├── .dev_scripts ├── build_docs.sh └── dockerci.sh ├── .github ├── ISSUE_TEMPLATE │ ├── bug-report.yaml │ ├── config.yml │ ├── doc-demo-bug.yaml │ └── feature-requests.yaml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── citest.yaml │ ├── lint.yaml │ └── publish.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .pre-commit-config_local.yaml ├── .streamlit └── config.toml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── README_CN.md ├── apps ├── agentfabric │ ├── README.md │ ├── README_CN.md │ ├── __init__.py │ ├── app.py │ ├── appBot.py │ ├── assets │ │ ├── app.css │ │ ├── appBot.css │ │ ├── bot.jpg │ │ └── user.jpg │ ├── builder_core.py │ ├── config │ │ ├── builder_config.json │ │ ├── builder_config_ci.json │ │ ├── builder_config_template.json │ │ ├── builder_config_wuxia.json │ │ ├── custom_bot_avatar.png │ │ ├── model_config.json │ │ └── tool_config.json │ ├── config_utils.py │ ├── gradio_utils.py │ ├── help_tools.py │ ├── i18n.py │ ├── openapi_example │ │ ├── aigc_wordart_semantic.json │ │ └── aigc_wordart_texture.json │ ├── publish_util.py │ ├── requirements.txt │ ├── response.json │ ├── server.py │ ├── server_logging.py │ ├── server_utils.py │ ├── user_core.py │ └── version.py ├── codexgraph_agent │ ├── README.md │ ├── build_requirements.txt │ ├── codexgraph.png │ ├── help.py │ ├── img │ │ ├── README │ │ │ ├── code_chat2.png │ │ │ ├── code_commentor1.png │ │ │ ├── code_commentor2.png │ │ │ ├── code_debug_0.png │ │ │ ├── code_debug_7.png │ │ │ ├── code_debug_8.png │ │ │ ├── code_debug_9.png │ │ │ ├── code_gen0.png │ │ │ ├── code_gen1.png │ │ │ └── code_unittester.png │ │ └── framework.png │ ├── pages │ │ ├── code_chat.py │ │ ├── code_commenter.py │ │ ├── code_debugger.py │ │ ├── code_generator.py │ │ ├── code_unittester.py │ │ └── components │ │ │ ├── page.py │ │ │ ├── setting.py │ │ │ ├── sidebar.py │ │ │ └── states.py │ ├── prompt │ │ ├── code_chat │ │ │ └── python │ │ │ │ ├── generate_prompt.txt │ │ │ │ ├── start_prompt_cypher.txt │ │ │ │ ├── start_prompt_primary.txt │ │ │ │ ├── system_prompt_cypher.txt │ │ │ │ └── system_prompt_primary.txt │ │ ├── code_commenter │ │ │ └── python │ │ │ │ ├── generate_prompt.txt │ │ │ │ ├── start_prompt_cypher.txt │ │ │ │ ├── start_prompt_primary.txt │ │ │ │ ├── system_prompt_cypher.txt │ │ │ │ └── system_prompt_primary.txt │ │ ├── code_debugger │ │ │ └── python │ │ │ │ ├── generate_prompt.txt │ │ │ │ ├── start_prompt_cypher.txt │ │ │ │ ├── start_prompt_cypher_buggy_loc.txt │ │ │ │ ├── start_prompt_primary.txt │ │ │ │ ├── system_prompt_cypher.txt │ │ │ │ └── system_prompt_primary.txt │ │ ├── code_generator │ │ │ └── python │ │ │ │ ├── generate_prompt.txt │ │ │ │ ├── start_prompt_cypher.txt │ │ │ │ ├── start_prompt_primary.txt │ │ │ │ ├── system_prompt_cypher.txt │ │ │ │ └── system_prompt_primary.txt │ │ ├── code_unittester │ │ │ └── python │ │ │ │ ├── generate_prompt.txt │ │ │ │ ├── start_prompt_cypher.txt │ │ │ │ ├── start_prompt_primary.txt │ │ │ │ ├── system_prompt_cypher.txt │ │ │ │ └── system_prompt_primary.txt │ │ └── graph_database │ │ │ └── python │ │ │ ├── example.txt │ │ │ └── schema.txt │ ├── requirements.txt │ ├── run.py │ └── setting.json ├── datascience_assistant │ ├── README.md │ └── app.py ├── mcp-playground │ ├── app.py │ ├── assets │ │ ├── deepseek.png │ │ ├── logo.png │ │ ├── mcp.png │ │ ├── meta.webp │ │ ├── modelscope-mcp.png │ │ └── qwen.png │ ├── config.py │ ├── env.py │ ├── mcp_client.py │ ├── requirements.txt │ ├── tools │ │ └── oss.py │ └── ui_components │ │ ├── add_mcp_server_button.py │ │ ├── config_form.py │ │ └── mcp_servers_button.py ├── mobile_agent │ ├── README.md │ ├── requirements.txt │ └── run.py ├── msgpt │ ├── app.py │ ├── gradio_chatbot.py │ ├── main.css │ ├── predict.py │ └── run_msgpt.sh └── multi_roles_chat_room │ ├── __init__.py │ ├── app.py │ ├── assets │ ├── app.css │ └── app.js │ ├── requirements.txt │ ├── resources │ ├── default_girl.png │ ├── fanxian.jpg │ ├── guyi.png │ ├── haitangduoduo.jpg │ ├── linwaner.jpeg │ ├── liyunsi.png │ ├── silili.jpeg │ ├── zhandoudou.jpg │ └── zhengziyan.png │ ├── role_core.py │ └── story_holder.py ├── config ├── .env.template ├── cfg_model_template.json └── cfg_tool_template.json ├── docker ├── Dockerfile ├── build_docker.sh ├── docker-compose.yaml ├── dockerfile.agentfabric ├── tool_manager.dockerfile └── tool_node.dockerfile ├── docs ├── Makefile ├── README.md ├── make.bat ├── resource │ ├── agentfabric_0.png │ ├── agentfabric_1.png │ ├── agentfabric_2.png │ ├── agentfabric_3.png │ ├── agentfabric_4.png │ ├── local_deploy.png │ ├── local_deploy_agent.png │ ├── terminal-file.png │ └── tool-readme.png ├── source │ ├── .readthedocs.yaml │ ├── agents │ │ └── data_science_assistant.md │ ├── conf.py │ ├── contributing │ │ └── tool_contribution_guide.md │ ├── deployment │ │ └── local_deploy.md │ ├── get_started │ │ ├── installation.md │ │ ├── introduction.md │ │ └── quickstart.md │ ├── index.rst │ ├── llms │ │ ├── llama3.1_tool_calling.md │ │ └── qwen2_tool_calling.md │ ├── modules │ │ ├── callback.md │ │ ├── llm.md │ │ ├── memory.md │ │ ├── retrieve.md │ │ └── tool.md │ ├── training_llms │ │ ├── train.md │ │ └── train_agentfabric_llm_tool_use.md │ └── use_cases │ │ ├── application.md │ │ ├── code_interpreter_case.md │ │ ├── llama3_for_agent.md │ │ └── openAPI_for_agent.md └── source_en │ ├── .readthedocs.yaml │ ├── agents │ └── data_science_assistant.md │ ├── conf.py │ ├── contributing │ └── tool_contribution_guide.md │ ├── deployment │ └── local_deploy.md │ ├── get_started │ ├── installation.md │ ├── introduction.md │ └── quickstart.md │ ├── index.rst │ ├── llms │ ├── llama3.1_tool_calling.md │ └── qwen2_tool_calling.md │ ├── modules │ ├── callback.md │ ├── llm.md │ ├── memory.md │ ├── retrieve.md │ └── tool.md │ ├── training_llms │ ├── train.md │ └── train_agentfabric_llm_tool_use.md │ └── use_cases │ ├── application.md │ ├── code_interpreter_case.md │ ├── llama3_for_agent.md │ └── openAPI_for_agent.md ├── examples ├── agents │ ├── data_science_assistant.ipynb │ ├── modelscopegpt_agent.ipynb │ ├── multi-agents │ │ ├── demo_multi_role_videogen.ipynb │ │ ├── multi_role_chatroom.py │ │ ├── simple_chat_with_local.py │ │ └── simple_chat_with_ray.py │ └── simple_agent.ipynb ├── apps │ ├── modelscope_agentfabric.ipynb │ └── multi_roles_chat_room.ipynb ├── llms │ ├── finetune_llm │ │ ├── llm_infer.py │ │ ├── llm_sft.py │ │ ├── requirements.txt │ │ ├── scripts │ │ │ └── train │ │ │ │ ├── ds_stage_2.json │ │ │ │ ├── ds_stage_3.json │ │ │ │ ├── run_ms_agent_single.sh │ │ │ │ ├── run_qwen_ddp.sh │ │ │ │ ├── run_qwen_ds_stage2.sh │ │ │ │ └── run_qwen_ds_stage3.sh │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── dataset.py │ │ │ ├── models.py │ │ │ └── utils.py │ ├── local_llm.ipynb │ ├── multi_llm_with_alpha_umi │ │ ├── multi_llm_with_alpha_umi.ipynb │ │ ├── run_deploy.sh │ │ ├── run_test.sh │ │ └── test_alpha_umi.py │ ├── ollama.ipynb │ └── vllm_glm4.ipynb ├── mcp_apps │ ├── config.json │ └── web_generator.py ├── memory & knowledge │ ├── history_persistence.ipynb │ ├── llamaindex_rag.ipynb │ ├── multi_round_with_history.ipynb │ └── simple_rag.ipynb └── tools │ ├── code_interpreter.ipynb │ ├── langchian_as_third_party_tools.ipynb │ ├── local_new_tool.ipynb │ ├── openapi_schema_tool.ipynb │ └── register_new_tool.ipynb ├── modelscope_agent ├── __init__.py ├── agent.py ├── agent_env_util.py ├── agents │ ├── __init__.py │ ├── agent_builder.py │ ├── agent_with_mcp.py │ ├── alpha_umi.py │ ├── codexgraph_agent │ │ ├── __init__.py │ │ ├── cypher_agent.py │ │ ├── prompt.py │ │ ├── task │ │ │ ├── __init__.py │ │ │ ├── code_chat.py │ │ │ ├── code_debugger.py │ │ │ └── code_general.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── code_utils.py │ │ │ ├── cypher_utils.py │ │ │ └── prompt_utils.py │ ├── data_science_assistant.py │ ├── gen_keyword.py │ ├── mobile_agent_v2 │ │ ├── __init__.py │ │ ├── mobile_agent_v2.py │ │ └── prompt.py │ ├── multi_role_play.py │ └── role_play.py ├── agents_registry.py ├── callbacks │ ├── __init__.py │ ├── base.py │ └── run_state.py ├── constants.py ├── environment │ ├── __init__.py │ ├── android_adb │ │ ├── __init__.py │ │ ├── android_adb_env.py │ │ └── utils.py │ ├── environment.py │ └── graph_database │ │ ├── __init__.py │ │ ├── ast_search │ │ ├── __init__.py │ │ ├── ast_manage.py │ │ └── ast_utils.py │ │ ├── build.py │ │ ├── graph_database.py │ │ └── indexer │ │ ├── __init__.py │ │ ├── index_utils.py │ │ ├── my_client.py │ │ ├── my_graph_db.py │ │ ├── run_index_single.py │ │ ├── shallow_indexer.py │ │ └── sourcetraildb.py ├── llm │ ├── __init__.py │ ├── base.py │ ├── dashscope.py │ ├── modelscope.py │ ├── ollama.py │ ├── openai.py │ ├── openai_fn_call.py │ ├── utils │ │ ├── __init__.py │ │ ├── function_call_with_raw_prompt.py │ │ ├── llm_templates.py │ │ └── utils.py │ ├── vllm.py │ └── zhipu.py ├── memory │ ├── __init__.py │ ├── base.py │ ├── memory_with_file_knowledge.py │ ├── memory_with_rag.py │ └── memory_with_retrieval_knowledge.py ├── multi_agents_utils │ ├── README.md │ ├── README_CN.md │ ├── __init__.py │ └── executors │ │ ├── __init__.py │ │ ├── local.py │ │ └── ray.py ├── rag │ ├── README_zh.md │ ├── __init__.py │ ├── base.py │ ├── emb.py │ ├── knowledge.py │ ├── llm.py │ ├── rag_template │ │ ├── __init__.py │ │ ├── best_match.py │ │ └── fusion.py │ └── reader │ │ ├── __init__.py │ │ └── image.py ├── schemas.py ├── storage │ ├── __init__.py │ ├── base.py │ ├── file_storage.py │ └── vector_storage.py ├── task_center.py ├── tools │ ├── __init__.py │ ├── amap_weather.py │ ├── base.py │ ├── code_interpreter │ │ ├── AlibabaPuHuiTi-3-45-Light.ttf │ │ ├── __init__.py │ │ ├── code_interpreter.py │ │ ├── code_interpreter_init_kernel.py │ │ └── code_interpreter_nb.py │ ├── contrib │ │ ├── __init__.py │ │ └── demo │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── renew_aliyun_instance.py │ │ │ └── test_case.py │ ├── dashscope_tools │ │ ├── __init__.py │ │ ├── image_enhancement.py │ │ ├── image_generation.py │ │ ├── image_generation_lite.py │ │ ├── paraformer_asr_tool.py │ │ ├── qwen_vl.py │ │ ├── sambert_tts_tool.py │ │ ├── style_repaint.py │ │ └── wordart_tool.py │ ├── doc_parser.py │ ├── hf_tool.py │ ├── langchain_proxy_tool.py │ ├── mcp │ │ ├── __init__.py │ │ ├── mcp_client.py │ │ ├── mcp_manager.py │ │ ├── servers │ │ │ ├── __init__.py │ │ │ ├── crawl4ai │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── config.json │ │ │ │ ├── requirements.txt │ │ │ │ └── server.py │ │ │ └── notebook │ │ │ │ ├── __init__.py │ │ │ │ ├── config.json │ │ │ │ └── server.py │ │ └── utils.py │ ├── metagpt_tools │ │ ├── libs │ │ │ ├── data_preprocess.py │ │ │ └── feature_engineering.py │ │ ├── task_type.py │ │ ├── tool_convert.py │ │ ├── tool_data_type.py │ │ ├── tool_recommend.py │ │ └── tool_registry.py │ ├── modelscope_tools │ │ ├── __init__.py │ │ ├── image_chat_tool.py │ │ ├── pipeline_tool.py │ │ ├── text_address_tool.py │ │ ├── text_ie_tool.py │ │ ├── text_ner_tool.py │ │ ├── text_to_speech_tool.py │ │ ├── text_to_video_tool.py │ │ ├── translation_en2zh_tool.py │ │ └── translation_zh2en_tool.py │ ├── openapi_plugin.py │ ├── rapidapi_tools │ │ ├── Finance │ │ │ ├── __init__.py │ │ │ └── current_exchage.py │ │ ├── Modelscope │ │ │ ├── __init__.py │ │ │ ├── pipeline_tool.py │ │ │ └── text_ie_tool.py │ │ ├── Movies │ │ │ ├── __init__.py │ │ │ └── movie_tv_music_search_and_download.py │ │ ├── Number │ │ │ ├── __init__.py │ │ │ └── numbers.py │ │ ├── Translate │ │ │ ├── __init__.py │ │ │ └── google_translate.py │ │ ├── __init__.py │ │ └── basetool_for_alpha_umi.py │ ├── similarity_search.py │ ├── storage_proxy_tool.py │ ├── utils │ │ ├── __init__.py │ │ ├── openapi_utils.py │ │ ├── oss.py │ │ └── output_wrapper.py │ ├── web_browser.py │ └── web_search │ │ ├── __init__.py │ │ ├── search_util.py │ │ ├── searcher │ │ ├── __init__.py │ │ ├── base_searcher.py │ │ ├── bing.py │ │ └── kuake.py │ │ └── web_search.py ├── utils │ ├── __init__.py │ ├── base64_utils.py │ ├── git.py │ ├── import_utils.py │ ├── logger.py │ ├── nltk │ │ ├── averaged_perceptron_tagger.zip │ │ ├── punkt.zip │ │ └── stopwords.zip │ ├── nltk_utils.py │ ├── parse_doc.py │ ├── qwen.tiktoken │ ├── qwen_agent │ │ ├── __init__.py │ │ ├── base.py │ │ ├── base_fn_call.py │ │ ├── fncall_prompts │ │ │ ├── __init__.py │ │ │ ├── base_fncall_prompt.py │ │ │ ├── nous_fncall_prompt.py │ │ │ └── qwen_fncall_prompt.py │ │ ├── schema.py │ │ ├── settings.py │ │ ├── tokenization_qwen.py │ │ └── utils.py │ ├── retry.py │ ├── tokenization_utils.py │ └── utils.py └── version.py ├── modelscope_agent_servers ├── README.md ├── __init__.py ├── assistant_server │ ├── __init__.py │ ├── api.py │ ├── models.py │ └── utils.py ├── requirements.txt ├── service_utils.py ├── tool_manager_server │ ├── __init__.py │ ├── api.py │ ├── connections.py │ ├── models.py │ ├── sandbox.py │ └── utils.py └── tool_node_server │ ├── __init__.py │ ├── api.py │ ├── assets │ └── configuration.json │ ├── models.py │ └── utils.py ├── requirements.txt ├── requirements └── docs.txt ├── resources ├── MSAgent-Bench.png ├── data_science_assistant_streamlit_1.png ├── data_science_assistant_streamlit_2.png ├── data_science_assistant_streamlit_3.png ├── data_science_assistant_streamlit_4.png ├── modelscope-agent.png ├── modelscopegpt_case_knowledge-qa.png ├── modelscopegpt_case_multi-modal.png ├── modelscopegpt_case_multi-turn.png ├── modelscopegpt_case_single-step.png └── modelscopegpt_case_video-generation.png ├── scripts ├── run_assistant_server.sh ├── run_tool_manager.sh └── run_tool_node.sh ├── setup.cfg ├── setup.py ├── test_single.py └── tests ├── __init__.py ├── agents ├── __init__.py ├── test_agent_builder.py ├── test_agent_with_api_tool.py ├── test_memory_with_file_knowledge.py └── test_memory_with_retrieval_knowledge.py ├── llms ├── __init__.py ├── test_llm.py └── test_vllm.py ├── samples ├── 34aca18b-17a1-4558-9064-22fdfcef7a94.wav ├── girl.png ├── luoli15.jpg ├── modelscope_qa_1.txt ├── modelscope_qa_2.txt ├── ms_intro.png ├── rag.png ├── rag2.png ├── rag3.jpg └── 常见QA.pdf ├── services ├── test_sandbox.py └── test_tool_manager.py ├── storage ├── test_file_storage.py └── test_vector_storage.py ├── test_agent.py ├── test_agent_env_util.py ├── test_agent_registry.py ├── test_callback.py ├── test_environment.py ├── test_rag.py ├── tools ├── __init__.py ├── test_code_interpreter.py ├── test_dashscope_asr_tts.py ├── test_hf_tool.py ├── test_image_enhancement.py ├── test_image_gen.py ├── test_image_gen_lite.py ├── test_langchain_tool.py ├── test_modelscope_tool.py ├── test_openapi_schema.py ├── test_qwen_vl_tool.py ├── test_rapid_api_tool.py ├── test_service_proxy.py ├── test_style_repaint.py ├── test_too_output_wrapper.py ├── test_weather.py ├── test_web_browsing.py ├── test_web_search.py └── test_wordart_tool.py ├── ut_utils.py ├── utils.py └── utils ├── __init__.py ├── test_git_clone.py └── test_token_count.py /.dev_scripts/build_docs.sh: -------------------------------------------------------------------------------- 1 | pip install -r requirements/docs.txt 2 | cd docs 3 | rm -rf build 4 | 5 | # update api rst 6 | #rm -rf source/api/ 7 | #sphinx-apidoc --module-first -o source/api/ ../modelscope/ 8 | make html 9 | -------------------------------------------------------------------------------- /.dev_scripts/dockerci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install dependencies for ci 4 | wget -O ffmpeg.tar.xz https://modelscope-agent.oss-cn-hangzhou.aliyuncs.com/resources/ffmpeg.tar.xz 5 | tar xvf ffmpeg.tar.xz 6 | 7 | export PATH=`pwd`/ffmpeg-git-20240629-amd64-static:$PATH 8 | 9 | sudo apt-get install libcurl4 openssl 10 | wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-ubuntu2004-7.0.11.tgz 11 | tar -zxvf mongodb-linux-x86_64-ubuntu2004-7.0.11.tgz 12 | export PATH=`pwd`/mongodb-linux-x86_64-ubuntu2004-7.0.11/bin:$PATH 13 | 14 | mkdir mongodb 15 | mongod --dbpath ./mongodb --logpath ./mongo.log --fork 16 | 17 | export PATH=`pwd`:$PATH 18 | pip install torch 19 | export CODE_INTERPRETER_WORK_DIR=${GITHUB_WORKSPACE} 20 | echo "${CODE_INTERPRETER_WORK_DIR}" 21 | 22 | # cp file 23 | cp tests/samples/* "${CODE_INTERPRETER_WORK_DIR}/" 24 | ls "${CODE_INTERPRETER_WORK_DIR}" 25 | pip install playwright 26 | playwright install --with-deps chromium 27 | 28 | # install package 29 | pip install fastapi pydantic uvicorn docker sqlmodel transformers ray 30 | pip install pymongo motor llama-index-storage-docstore-mongodb==0.1.3 llama-index-storage-index-store-mongodb==0.1.2 llama-index-readers-mongodb==0.1.7 31 | pip install tensorflow pyclipper shapely tf_slim 32 | pip install moviepy 33 | 34 | # run ci 35 | pytest tests 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Ask a question or get support 4 | url: https://github.com/modelscope/modelscope-agent/discussions/new?category=q-a 5 | about: Ask a question or request support for using Modelscope-Agent 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/doc-demo-bug.yaml: -------------------------------------------------------------------------------- 1 | name: Documentation 2 | title: "[] " 3 | description: Report an issue with the Modelscope-agent documentation 4 | labels: [docs, triage] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: Thank you for helping us improve the Modelscope-agent documentation and demo! 9 | 10 | - type: textarea 11 | attributes: 12 | label: Description 13 | description: | 14 | Tell us about the change you'd like to see. For example, "I'd like to 15 | see more examples of how to create a new tool." 16 | validations: 17 | required: true 18 | 19 | - type: textarea 20 | attributes: 21 | label: Link 22 | description: | 23 | If the problem is related to an existing section, please add a link to 24 | the section. For example, https://github.com/modelscope/modelscope-agent/blob/master/demo/demo_alpha_umi.ipynb. 25 | validations: 26 | required: false 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-requests.yaml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Suggest an idea for Modelscope-Agent project 3 | title: "[] " 4 | labels: [enhancement] 5 | assignees: 6 | - zzhangpurdue 7 | - suluyana 8 | 9 | body: 10 | - type: markdown 11 | attributes: 12 | value: | 13 | Thank you for finding the time to propose a new feature! 14 | We really appreciate the community efforts to improve Modelscope-Agent. 15 | 16 | - type: checkboxes 17 | id: searched 18 | attributes: 19 | label: Initial Checks 20 | description: | 21 | Just a few checks to make sure you need to create a feature request. 22 | 23 | _Sorry to sound so draconian 👿; but every second spent replying to issues is time not spent improving Modelscope-Agent 🙇._ 24 | options: 25 | - label: I have searched Google & GitHub for similar requests and couldn't find anything 26 | required: true 27 | - label: I have read and followed [the docs & demos](https://github.com/modelscope/modelscope-agent/tree/master/demo) and still think this feature is missing 28 | required: true 29 | 30 | - type: textarea 31 | attributes: 32 | label: Description 33 | description: A short description of your feature 34 | 35 | - type: textarea 36 | attributes: 37 | label: Use case 38 | description: > 39 | Describe the use case of your feature request. It will help us understand and 40 | prioritize the feature request. 41 | placeholder: > 42 | Rather than telling us how you might implement this feature, try to take a 43 | step back and describe what you are trying to achieve. 44 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ## Change Summary 5 | 6 | 7 | 8 | ## Related issue number 9 | 10 | 11 | 12 | ## Checklist 13 | 14 | * [ ] The pull request title is a good summary of the changes - it will be used in the changelog 15 | * [ ] Unit tests for the changes exist 16 | * [ ] Run `pre-commit install` and `pre-commit run --all-files` before git commit, and passed lint check. 17 | * [ ] Some cases need DASHSCOPE_TOKEN_API to pass the Unit Tests, I have at least **pass the Unit tests on local** 18 | * [ ] Documentation reflects the changes where applicable 19 | * [ ] My PR is ready to review, **please add a comment including the phrase "please review" to assign reviewers** 20 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: Lint test 2 | 3 | on: [push, pull_request] 4 | 5 | concurrency: 6 | group: ${{ github.workflow }}-${{ github.ref }} 7 | cancel-in-progress: true 8 | 9 | jobs: 10 | lint: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Set up Python 3.8 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: 3.8 18 | - name: Install pre-commit hook 19 | run: | 20 | pip install pre-commit 21 | - name: Linting 22 | run: pre-commit run --all-files 23 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v**' 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.ref }}-publish 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | build-n-publish: 14 | runs-on: ubuntu-22.04 15 | #if: startsWith(github.event.ref, 'refs/tags') 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Set up Python 3.10 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: '3.10' 22 | - name: Install wheel 23 | run: pip install wheel 24 | - name: Build ModelScope-Agent 25 | run: python setup.py sdist bdist_wheel 26 | - name: Publish package to PyPI 27 | run: | 28 | pip install twine 29 | twine upload dist/* --skip-existing -u __token__ -p ${{ secrets.PYPI_API_TOKEN }} 30 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pycqa/flake8.git 3 | rev: 4.0.0 4 | hooks: 5 | - id: flake8 6 | exclude: | 7 | (?x)^( 8 | demo/ 9 | )$ 10 | - repo: https://github.com/PyCQA/isort.git 11 | rev: 4.3.21 12 | hooks: 13 | - id: isort 14 | exclude: | 15 | (?x)^( 16 | demo/ 17 | )$ 18 | - repo: https://github.com/pre-commit/mirrors-yapf.git 19 | rev: v0.30.0 20 | hooks: 21 | - id: yapf 22 | exclude: | 23 | (?x)^( 24 | demo/ 25 | )$ 26 | - repo: https://github.com/pre-commit/pre-commit-hooks.git 27 | rev: v3.1.0 28 | hooks: 29 | - id: trailing-whitespace 30 | exclude: demo/ 31 | - id: check-yaml 32 | exclude: demo/ 33 | - id: end-of-file-fixer 34 | exclude: demo/ 35 | - id: requirements-txt-fixer 36 | exclude: demo/ 37 | - id: double-quote-string-fixer 38 | exclude: demo/ 39 | - id: check-merge-conflict 40 | exclude: demo/ 41 | - id: fix-encoding-pragma 42 | exclude: demo/ 43 | args: ["--remove"] 44 | - id: mixed-line-ending 45 | exclude: demo/ 46 | args: ["--fix=lf"] 47 | -------------------------------------------------------------------------------- /.pre-commit-config_local.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: /home/admin/pre-commit/flake8 3 | rev: 4.0.0 4 | hooks: 5 | - id: flake8 6 | exclude: | 7 | (?x)^( 8 | demo/ 9 | )$ 10 | - repo: /home/admin/pre-commit/isort 11 | rev: 4.3.21 12 | hooks: 13 | - id: isort 14 | exclude: | 15 | (?x)^( 16 | demo/ 17 | )$ 18 | - repo: /home/admin/pre-commit/mirrors-yapf 19 | rev: v0.30.0 20 | hooks: 21 | - id: yapf 22 | exclude: | 23 | (?x)^( 24 | demo/ 25 | )$ 26 | - repo: /home/admin/pre-commit/pre-commit-hooks 27 | rev: v3.1.0 28 | hooks: 29 | - id: trailing-whitespace 30 | exclude: demo/ 31 | - id: check-yaml 32 | exclude: demo/ 33 | - id: end-of-file-fixer 34 | exclude: demo/ 35 | - id: requirements-txt-fixer 36 | exclude: demo/ 37 | - id: double-quote-string-fixer 38 | exclude: demo/ 39 | - id: check-merge-conflict 40 | exclude: demo/ 41 | - id: fix-encoding-pragma 42 | exclude: demo/ 43 | args: ["--remove"] 44 | - id: mixed-line-ending 45 | exclude: demo/ 46 | args: ["--fix=lf"] 47 | -------------------------------------------------------------------------------- /.streamlit/config.toml: -------------------------------------------------------------------------------- 1 | # .streamlit/config.toml 2 | 3 | [global] 4 | # Set the PYTHONPATH to include the project root directory 5 | pythonpath = "${PYTHONPATH}" 6 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include modelscope_agent/tools/code_interpreter *.ttf 2 | recursive-include modelscope_agent/utils *.tiktoken 3 | recursive-include modelscope_agent/utils/nltk *.zip 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | WHL_BUILD_DIR :=package 2 | DOC_BUILD_DIR :=docs/build/ 3 | 4 | # default rule 5 | default: whl docs 6 | 7 | .PHONY: docs 8 | docs: 9 | bash .dev_scripts/build_docs.sh 10 | 11 | .PHONY: whl 12 | whl: 13 | python setup.py sdist bdist_wheel 14 | 15 | .PHONY: clean 16 | clean: 17 | rm -rf $(WHL_BUILD_DIR) $(DOC_BUILD_DIR) 18 | -------------------------------------------------------------------------------- /apps/agentfabric/README_CN.md: -------------------------------------------------------------------------------- 1 | 2 |

Modelscope AgentFabric: 开放可定制的AI智能体构建框架

3 | 4 |

5 |
6 | 7 |
8 |

9 | 10 | ## 介绍 11 | 12 | **Modelscope AgentFabric**是一个交互式智能体框架,用于方便地创建针对各种现实应用量身定制智能体。AgentFabric围绕可插拔和可定制的LLM构建,并增强了指令执行、额外知识检索和利用外部工具的能力。AgentFabric提供的交互界面包括: 13 | - **⚡ 智能体构建器**:一个自动指令和工具提供者,通过与用户聊天来定制用户的智能体 14 | - **⚡ 用户智能体**:一个为用户的实际应用定制的智能体,提供构建智能体或用户输入的指令、额外知识和工具 15 | - **⚡ 配置设置工具**:支持用户定制用户智能体的配置,并实时预览用户智能体的性能 16 | 17 | 🔗 我们目前围绕DashScope提供的 [Qwen2.0 LLM API](https://help.aliyun.com/zh/dashscope/developer-reference/api-details) 来在AgentFabric上构建不同的智能体应用。同时我们正在积极探索,通过API或者ModelScope原生模型等方式,引入不同的举办强大基础能力的LLMs,来构建丰富多样的Agents。 18 | 19 | ## 安装 20 | 21 | 克隆仓库并安装依赖: 22 | 23 | ```bash 24 | git clone https://github.com/modelscope/modelscope-agent.git 25 | cd modelscope-agent && pip install -r requirements.txt && pip install -r apps/agentfabric/requirements.txt 26 | ``` 27 | 28 | ## 前提条件 29 | 30 | - Python 3.10 31 | - 获取使用Qwen 2.0模型所需的API-key,可从[DashScope](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key)免费开通和获取。 32 | 33 | ## 使用方法 34 | 35 | ```bash 36 | export PYTHONPATH=$PYTHONPATH:/path/to/your/modelscope-agent 37 | export DASHSCOPE_API_KEY=your_api_key 38 | cd modelscope-agent/apps/agentfabric 39 | python app.py 40 | ``` 41 | 42 | ## 🚀 发展路线规划 43 | - [x] 支持人工配置构建智能体 44 | - [x] 基于LLM对话构建智能体 45 | - [x] 支持在ModelScope创空间上使用 [link](https://modelscope.cn/studios/wenmengzhou/AgentFabric/summary) [PR #98](https://github.com/modelscope/modelscope-agent/pull/98) 46 | - [x] 知识库检索效果优化 [PR #105](https://github.com/modelscope/modelscope-agent/pull/105) [PR #107](https://github.com/modelscope/modelscope-agent/pull/107) [PR #109](https://github.com/modelscope/modelscope-agent/pull/109) 47 | - [x] 支持智能体发布和分享 48 | - [ ] 支持其他多种LLM模型API和ModelScope模型 49 | - [ ] 处理长文本输入到内存 50 | - [ ] 生产级支持:日志和性能分析 51 | - [ ] 支持智能体微调 52 | - [ ] 在不同场景中智能体的效果评估 53 | -------------------------------------------------------------------------------- /apps/agentfabric/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/agentfabric/__init__.py -------------------------------------------------------------------------------- /apps/agentfabric/assets/bot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/agentfabric/assets/bot.jpg -------------------------------------------------------------------------------- /apps/agentfabric/assets/user.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/agentfabric/assets/user.jpg -------------------------------------------------------------------------------- /apps/agentfabric/config/builder_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "", 3 | "avatar": "custom_bot_avatar.png", 4 | "description": "", 5 | "instruction": "", 6 | "language": "zh", 7 | "prompt_recommend": [ 8 | "你可以做什么?", 9 | "你有什么功能?", 10 | "如何使用你的功能?", 11 | "能否给我一些示例指令?" 12 | ], 13 | "knowledge": [], 14 | "tools": { 15 | "image_gen": { 16 | "name": "Wanx Image Generation", 17 | "is_active": true, 18 | "use": false 19 | }, 20 | "code_interpreter": { 21 | "name": "Code Interpreter", 22 | "is_active": true, 23 | "use": false 24 | } 25 | }, 26 | "model": "qwen-max" 27 | } 28 | -------------------------------------------------------------------------------- /apps/agentfabric/config/builder_config_ci.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Python数据分析师", 3 | "avatar": "image.png", 4 | "description": "使用python解决任务时,你可以运行代码并得到结果,如果运行结果有错误,你需要尽可能对代码进行改进。你可以处理用户上传到电脑的文件。", 5 | "instruction": "1. 你会数学解题;\n2. 你会数据分析和可视化;\n3. 用户上传文件时,你必须先了解文件结构再进行下一步操作;如果没有上传文件但要求画图,则编造示例数据画图\n4. 调用工具前你需要说明理由;Think step by step\n5. 代码出错时你需要反思并改进", 6 | "prompt_recommend": [ 7 | "制作示例饼图来报告某网站流量来源。", 8 | "鸡兔同笼 32头 88腿 多少兔", 9 | "帮我把这个链接“https://modelscope.cn/my/overview”网址,转成二维码,并展示图片", 10 | "一支钢笔5元,一支铅笔3元,一个文具盒10元,一套文具包括2支钢笔,3支铅笔,1个文具盒,一共多少钱?" 11 | ], 12 | "knowledge": [], 13 | "tools": { 14 | "image_gen": { 15 | "name": "Wanx Image Generation", 16 | "is_active": true, 17 | "use": false 18 | }, 19 | "code_interpreter": { 20 | "name": "Code Interpreter", 21 | "is_active": true, 22 | "use": true 23 | }, 24 | "amap_weather": { 25 | "name": "高德天气", 26 | "is_active": true, 27 | "use": false 28 | } 29 | }, 30 | "model": "qwen-max-1201" 31 | } 32 | -------------------------------------------------------------------------------- /apps/agentfabric/config/builder_config_template.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "AI-Agent", 3 | "avatar": "logo.png", 4 | "description": "我希望AI-Agent能够像多啦A梦一样,拥有各种神奇的技能和能力,可以帮我解决生活中的各种问题。", 5 | "instruction": "请告诉我你想要什么帮助,我会尽力提供解决方案。;如果你有任何问题,请随时向我提问,我会尽我所能回答你的问题。;我可以帮你查找信息、提供建议、提醒日程等,只需要你告诉我你需要什么。", 6 | "prompt_recommend": [ 7 | "你好,我是AI-Agent,有什么可以帮助你的吗?", 8 | "嗨,很高兴见到你,我是AI-Agent,你可以问我任何问题。", 9 | "你好,我是AI-Agent,需要我帮你做些什么吗?", 10 | "嗨,我是AI-Agent,有什么我可以帮到你的吗?" 11 | ], 12 | "knowledge": [], 13 | "tools": { 14 | "image_gen": { 15 | "name": "Wanx Image Generation", 16 | "is_active": true, 17 | "use": false 18 | }, 19 | "code_interpreter": { 20 | "name": "Code Interpreter", 21 | "is_active": true, 22 | "use": false 23 | } 24 | }, 25 | "model": "qwen-max-1201" 26 | } 27 | -------------------------------------------------------------------------------- /apps/agentfabric/config/builder_config_wuxia.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "武侠小说家", 3 | "avatar": "custom_bot_avatar.png", 4 | "description": "能够生成武侠小说并配图", 5 | "instruction": "你的指令是为我提供一个基于金庸武侠小说世界的在线RPG游戏体验。在这个游戏中,玩家将扮演金庸故事中的一个关键角色,游戏情景将基于他的小说。这个游戏的玩法是互动式的,并遵循以下特定格式:\n\n<场景描述>:根据玩家的选择,故事情节将按照金庸小说的线索发展。你将描述角色所处的环境和情况。\n\n<场景图片>:对于每个场景,你将创造一个概括该情况的图像。这些图像的风格将类似于1980年代RPG游戏,大小是16:9宽屏比例。在这个步骤你需要调用画图工具,绘制<场景描述>。\n\n<选择>:在每次互动中,你将为玩家提供三个行动选项,分别标为A、B、C,以及第四个选项“D: 输入玩家的选择”。故事情节将根据玩家选择的行动进展。如果一个选择不是直接来自小说,你将创造性地适应故事,最终引导它回归原始情节。\n\n整个故事将围绕金庸小说中丰富而复杂的世界展开。每次互动必须包括<场景描述>、<场景图片>和<选择>。所有内容将以繁体中文呈现。你的重点将仅仅放在提供场景描述,场景图片和选择上,不包含其他游戏指导。场景尽量不要重复,要丰富一些。", 6 | "prompt_recommend": [ 7 | "扮演小龙女", 8 | "扮演杨过" 9 | ], 10 | "knowledge": [], 11 | "tools": { 12 | "image_gen": { 13 | "name": "Wanx Image Generation", 14 | "is_active": true, 15 | "use": true 16 | }, 17 | "code_interpreter": { 18 | "name": "Code Interpreter", 19 | "is_active": true, 20 | "use": false 21 | } 22 | }, 23 | "model": "qwen-max-1201" 24 | } 25 | -------------------------------------------------------------------------------- /apps/agentfabric/config/custom_bot_avatar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/agentfabric/config/custom_bot_avatar.png -------------------------------------------------------------------------------- /apps/agentfabric/requirements.txt: -------------------------------------------------------------------------------- 1 | gradio==4.36.1 2 | markdown-cjk-spacing 3 | mdx_truly_sane_lists 4 | modelscope-agent>=0.6.6 5 | modelscope_studio>=0.4.0 6 | moviepy 7 | pymdown-extensions 8 | python-slugify 9 | unstructured 10 | -------------------------------------------------------------------------------- /apps/agentfabric/response.json: -------------------------------------------------------------------------------- 1 | {"status_code": 500, "request_id": "0e8e65da-ee20-9c49-920a-94ca1df6ec09", "code": "InternalError.Algo", "message": "InternalError.Algo", "output": null, "usage": null} 2 | -------------------------------------------------------------------------------- /apps/agentfabric/server_logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from contextvars import ContextVar 4 | 5 | request_id_var = ContextVar('request_id', default='') 6 | 7 | 8 | # 创建一个日志过滤器,用于加入request_id到日志记录中 9 | class RequestIDLogFilter(logging.Filter): 10 | 11 | def filter(self, record): 12 | record.request_id = request_id_var.get('') 13 | record.ip_addr = os.getenv('ALIYUN_ECI_ETH0_IP', '') 14 | return True 15 | 16 | 17 | # 设置日志格式 18 | formatter = logging.Formatter( 19 | '[%(asctime)s] [%(request_id)s] [%(filename)s:%(lineno)d] [%(ip_addr)s] SERVER_LOG_%(levelname)s: %(message)s' 20 | ) 21 | 22 | logger = logging.getLogger('my_custom_logger') 23 | logger.setLevel(logging.INFO) 24 | 25 | file_handler = logging.FileHandler('info.log') 26 | file_handler.setLevel(logging.INFO) 27 | file_handler.addFilter(RequestIDLogFilter()) 28 | file_handler.setFormatter(formatter) 29 | 30 | console_handler = logging.StreamHandler() 31 | console_handler.setLevel(logging.INFO) 32 | console_handler.addFilter(RequestIDLogFilter()) 33 | console_handler.setFormatter(formatter) 34 | 35 | logger.addHandler(file_handler) 36 | logger.addHandler(console_handler) 37 | -------------------------------------------------------------------------------- /apps/agentfabric/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.3.0rc0' 2 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/build_requirements.txt: -------------------------------------------------------------------------------- 1 | fasteners 2 | jedi==0.17.2 3 | parso==0.7.0 4 | py2neo 5 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/codexgraph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/codexgraph.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_chat2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_chat2.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_commentor1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_commentor1.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_commentor2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_commentor2.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_debug_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_debug_0.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_debug_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_debug_7.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_debug_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_debug_8.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_debug_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_debug_9.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_gen0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_gen0.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_gen1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_gen1.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/README/code_unittester.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/README/code_unittester.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/img/framework.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/codexgraph_agent/img/framework.png -------------------------------------------------------------------------------- /apps/codexgraph_agent/pages/components/states.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import json 4 | import streamlit as st 5 | 6 | CONFIG_FILE = 'apps/codexgraph_agent/setting.json' 7 | 8 | 9 | def load_config(): 10 | with open(CONFIG_FILE, 'r') as f: 11 | return json.load(f) 12 | 13 | 14 | def save_config(config): 15 | with open(CONFIG_FILE, 'w') as f: 16 | json.dump(config, f, indent=4) 17 | 18 | 19 | # Load configuration 20 | config = load_config() 21 | 22 | 23 | def initialize_page_state(page_name): 24 | if 'shared' not in st.session_state: 25 | st.session_state.shared = config 26 | 27 | if page_name not in st.session_state: 28 | st.session_state[page_name] = { 29 | 'conversation': [], 30 | 'conversation_history': [], 31 | 'chat': [], 32 | 'final_result': '', 33 | 'input_text': '', 34 | 'input_file_path': '', 35 | 'error_place': None, 36 | 'reload_button': None, 37 | 'test_connect_button': None, 38 | 'build_button': None, 39 | 'test_connect_place': None, 40 | 'build_place': None, 41 | 'progress_bar': None, 42 | 'conversation_container': None, 43 | 'conversation_container_chat': None, 44 | 'setting': { 45 | 'history_path': '', 46 | 'history_list': [], 47 | }, 48 | } 49 | 50 | 51 | def get_json_files(path): 52 | try: 53 | return [ 54 | os.path.join(path, f) for f in os.listdir(path) 55 | if f.endswith('.json') 56 | ] 57 | except FileNotFoundError: 58 | return [] 59 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_chat/python/generate_prompt.txt: -------------------------------------------------------------------------------- 1 | ${message} Please Answer Question: 2 | 3 | ### User's Requirements: 4 | 5 | ${user_query} 6 | <\questions> 7 | 8 | #### Final Output Format: 9 | 10 | ... 11 | 12 | ... 13 | 14 | {{reference of source code 1}} 15 | {{reference of source code 2}} 16 | ... 17 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_chat/python/system_prompt_cypher.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a Cypher code assistant proficient in querying graph databases. Your task is to write Cypher queries based on the queries provided by the code assistant specializing in cross-file code tasks. Your goal is to extract the relevant information from the code graph database to support the adding new code requirements for code. 3 | 4 | # LIMITATIONS # 5 | 1. You cannot modify or add to the schema of the code graph database. 6 | 2. You must rely on the problem statements and constraints given by the cross-file code completion assistant. 7 | 8 | # CODE GRAPH DATABASE # 9 | The code graph database is derived from static parsing of the project. You will write Cypher queries to extract necessary information based on the text queries provided. The database is presumed error-free. If unexpected responses arise, it might be due to incorrect queries, missing nodes, or edges from indirect calls, dynamic behaviors, and complex control flows. 10 | 11 | # SCHEMA OF THE CODE GRAPH DATABASE # 12 | {{python_db_schema}} 13 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_chat/python/system_prompt_primary.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a software developer maintaining a large project. 3 | Your task is to answer various questions related to the code project raised by users, which may include asking questions, fixing bugs, adding function comments, adding new requirements, etc. 4 | 5 | The question contains a description marked between and . 6 | You can write text queries to retrieve information from a given code graph database to collect information, and then write answers to user questions. 7 | 8 | # LIMITATIONS # 9 | 1. You can only process text content, including code; 10 | 2. You cannot interpret graphical or visual content; 11 | 3. You have no access to the original project code instead of the information stored in the code graph database; 12 | 13 | # CODE GRAPH DATABASE # 14 | The code graph database is derived from static parsing of the project. Another code assistant, proficient in Cypher and graph databases, \ 15 | will translate your text queries into Cypher queries to extract the needed information based on your problem statement. \ 16 | The database is assumed to be devoid of issues. If unexpected responses occur during querying, it might be due to a faulty query, \ 17 | or missing nodes or edges resulting from indirect calls, dynamic behaviors, and complex control flows. 18 | 19 | # SCHEMA OF THE CODE GRAPH DATABASE # 20 | {{python_db_schema}} 21 | 22 | # Notes For code generate: 23 | 1. Any non-existent/not-found method/field/function is not allowed. 24 | 2. No assumptions are allowed. 25 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_commenter/python/generate_prompt.txt: -------------------------------------------------------------------------------- 1 | ${message} Please add detailed comments to the code: 2 | 3 | ### Code to be Annotated: 4 | ```python 5 | ${file_path} 6 | ${user_query} 7 | ``` 8 | 9 | #### Final Output Format: 10 | You will format your final output as follows: 11 | ```python 12 | {code_with_annotations} 13 | ``` 14 | 15 | #### Detailed Comments Notes: 16 | The comments should include: 17 | 1. Function Description: A brief description of the function's purpose and functionality. 18 | 2. Parameters: A list of the function's parameters, including their descriptions (omit types if they are unknown). If there are no parameters, state "Parameters: None". 19 | 3. Returns: A description of the function's return value, including its type (optional) and meaning (omit types if they are unknown). If there is no return value, state "Returns: None". 20 | 4. Exceptions (optional): A list of exceptions the function might raise, including the conditions under which they are raised. 21 | 22 | Example of detailed comments: 23 | ```python 24 | def example_function(param1: int, param2): 25 | """ 26 | This function performs a specific operation on two parameters and returns the result. 27 | 28 | Parameters: 29 | param1 (int): The first parameter, which is an integer. 30 | param2: The second parameter. 31 | 32 | Returns: 33 | bool: True if the operation is successful, False otherwise. 34 | 35 | Raises: 36 | ValueError: If param1 is negative. 37 | TypeError: If param2 is not a string. 38 | """ 39 | ``` 40 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_commenter/python/start_prompt_cypher.txt: -------------------------------------------------------------------------------- 1 | ### Text Queries: 2 | ${text_queries} 3 | 4 | ### Task Instructions: 5 | Your task is to decompose the given text queries into several simple ones and try to use precise or fuzzy matching, and then translate the decomposed text queries into the corresponding Cypher queries. You answer should follow the below formats: 6 | 7 | [start_of_cypher_queries] 8 | ### Query 1 9 | **decomposed text query**: 10 | ```cypher 11 | 12 | ``` 13 | 14 | ### Query 2 15 | **decomposed text query**: 16 | ```cypher 17 | 18 | ``` 19 | ... 20 | 21 | ### Query n 22 | **decomposed text query**: 23 | ```cypher 24 | 25 | ``` 26 | [end_of_cypher_queries] 27 | 28 | Here are some useful tips: 29 | 1. Try adding exception handling logic (e.g., `OPTIONAL`, `OR`) to return error information or handle edge cases in Cypher queries. 30 | 2. Use appropriate Cypher patterns and aggregation functions to handle exceptions and edge cases that may occur in queries. 31 | 3. Try to use the nodes (CLASS, METHOD, FUNCTION, FIELD, GLOBAL_VARIABLE, MODULE) and egdes (CONTAINS, HAS_METHOD, HAS_FIELD, USES, INHERITES) in the schema to rewrite text queries instead of using ambiguous expressions like `attributes` or `objects`. 32 | 4. Use fuzzy matching when retrieving nodes to avoid issues with absolute paths. For example, use the following Cypher query: `WHERE m.name =~ '.*'` 33 | 34 | Example: 35 | when you try to find MODULE a and FUNCTION b, first try to use precise matching: 36 | ``` 37 | MATCH (m:MODULE) 38 | WHERE m.name = 'a' 39 | MATCH (m)-[:CONTAINS]->(f:FUNCTION) 40 | WHERE f.name = 'b' 41 | RETURN m, f 42 | ``` 43 | and try to use fuzzy matching: 44 | ``` 45 | MATCH (m:MODULE) 46 | WHERE m.name =~ '.*a' 47 | MATCH (m)-[:CONTAINS]->(f:FUNCTION) 48 | WHERE f.name =~ '.*b' 49 | RETURN m, f 50 | ``` 51 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_commenter/python/system_prompt_cypher.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a Cypher code assistant proficient in querying graph databases. Your task is to write Cypher queries based on the queries provided by the code assistant specializing in cross-file code completion. Your goal is to extract the relevant information from the code graph database to support the completion of unfinished code. 3 | 4 | # LIMITATIONS # 5 | 1. You cannot modify or add to the schema of the code graph database. 6 | 2. You must rely on the problem statements and constraints given by the cross-file code completion assistant. 7 | 8 | # CODE GRAPH DATABASE # 9 | The code graph database is derived from static parsing of the project. You will write Cypher queries to extract necessary information based on the text queries provided. The database is presumed error-free. If unexpected responses arise, it might be due to incorrect queries, missing nodes, or edges from indirect calls, dynamic behaviors, and complex control flows. 10 | 11 | # SCHEMA OF THE CODE GRAPH DATABASE # 12 | {{python_db_schema}} 13 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_commenter/python/system_prompt_primary.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a code assistant specializing in cross-file adding meaningful and standardized comments to the function. Your task is to analyze the provided code to be annotated and complete it according to specified instructions. You may write text queries to retrieve information from the given code graph database. 3 | 4 | # LIMITATIONS # 5 | 1. You can only process text content, including code; 6 | 2. You cannot interpret graphical or visual content; 7 | 3. You have no access to the original project code instead of the information stored in the code graph database; 8 | 9 | # CODE GRAPH DATABASE # 10 | The code graph database is derived from static parsing of the project. Another code assistant, proficient in Cypher and graph databases, will translate your text queries into Cypher queries to extract the needed information based on your problem statement. The database is assumed to be devoid of issues. If unexpected responses occur during querying, it might be due to a faulty query, or missing nodes or edges resulting from indirect calls, dynamic behaviors, and complex control flows. 11 | 12 | # SCHEMA OF THE CODE GRAPH DATABASE # 13 | {{python_db_schema}} 14 | 15 | # REQUIREMENTS # 16 | 1. Since the Cypher code assistant can only translate your text queries into corresponding Cypher queries and does not have information about the given code files, please ensure that your text queries are very clear and include explicit input parameters. 17 | 2. Since your task involves cross-file code completion, it is crucial to pay attention to the module references in the import section. When writing queries, make sure to specify the module's full name. For example, for the statement from x.y import z, emphasize that the full module name should be 'x.y.z' rather than just 'z'. 18 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_debugger/python/generate_prompt.txt: -------------------------------------------------------------------------------- 1 | Write a patch for the issue, based on the retrieved context. 2 | You can import necessary libraries. 3 | Return the patch in the format below: 4 | You can write multiple modifications if needed. 5 | ``` 6 | # modification 1 7 | ... 8 | ... 9 | ... 10 | 11 | # modification 2 12 | ... 13 | ... 14 | ... 15 | 16 | # modification 3 17 | ... 18 | ``` 19 | Within ``, replace `...` with actual file path. 20 | 21 | Within ``, replace `...` with the original code snippet from the program. 22 | 23 | Within ``, replace `...` with the fixed version of the original code. When adding original code and updated code, pay attention to indentation, as the code is in Python. 24 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_debugger/python/start_prompt_cypher_buggy_loc.txt: -------------------------------------------------------------------------------- 1 | ### Text Queries: 2 | ${text_queries} 3 | 4 | ### Task Instructions: 5 | Your task is to decompose the given text queries into several simple ones and try to use precise or fuzzy matching, and then translate the decomposed text queries into the corresponding Cypher queries. Your answer should follow the below formats: 6 | 7 | [start_of_cypher_queries] 8 | ### Query 1 9 | **decomposed text query**: 10 | ```cypher 11 | 12 | ``` 13 | 14 | ### Query 2 15 | **decomposed text query**: 16 | ```cypher 17 | 18 | ``` 19 | ... 20 | 21 | ### Query n 22 | **decomposed text query**: 23 | ```cypher 24 | 25 | ``` 26 | [end_of_cypher_queries] 27 | 28 | NOTE: 29 | - If the attributes to be returned by the query are not specified, please return the entire node. 30 | - The logic to query whether a code snippet of a particular node contains a specific string has been temporarily disabled. 31 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_debugger/python/system_prompt_cypher.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a Cypher code assistant proficient in querying graph databases. Your task is to write Cypher queries based on the queries provided by the code assistant specializing in cross-file code tasks. Your goal is to extract the relevant information from the code graph database to support the adding new code requirements for code. 3 | 4 | # LIMITATIONS # 5 | 1. You cannot modify or add to the schema of the code graph database. 6 | 2. You must rely on the problem statements and constraints given by the cross-file code completion assistant. 7 | 8 | # CODE GRAPH DATABASE # 9 | The code graph database is derived from static parsing of the project. You will write Cypher queries to extract necessary information based on the text queries provided. The database is presumed error-free. If unexpected responses arise, it might be due to incorrect queries, missing nodes, or edges from indirect calls, dynamic behaviors, and complex control flows. 10 | 11 | # SCHEMA OF THE CODE GRAPH DATABASE # 12 | {{python_db_schema}} 13 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_debugger/python/system_prompt_primary.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a software developer maintaining a large project. 3 | You are working on an issue submitted to your project. 4 | The issue contains a description marked between and . 5 | You may write text queries to retrieve information from the given code graph database to gather buggy information, then write patches to solve the issues. 6 | 7 | # LIMITATIONS # 8 | 1. You can only process text content, including code; 9 | 2. You cannot interpret graphical or visual content; 10 | 3. You have no access to the original project code instead of the information stored in the code graph database; 11 | 12 | # CODE GRAPH DATABASE # 13 | The code graph database is derived from static parsing of the project. Another code assistant, proficient in Cypher and graph databases, \ 14 | will translate your text queries into Cypher queries to extract the needed information based on your problem statement. \ 15 | The database is assumed to be devoid of issues. If unexpected responses occur during querying, it might be due to a faulty query, \ 16 | or missing nodes or edges resulting from indirect calls, dynamic behaviors, and complex control flows. 17 | 18 | # SCHEMA OF THE CODE GRAPH DATABASE # 19 | {{python_db_schema}} 20 | 21 | # Notes For code generate: 22 | 1. Any non-existent/not-found method/field/function is not allowed. 23 | 2. No assumptions are allowed. 24 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_generator/python/generate_prompt.txt: -------------------------------------------------------------------------------- 1 | ${message} Please generate new code: 2 | 3 | ### User's Requirements: 4 | ```text 5 | ${file_path} 6 | ${user_query} 7 | ``` 8 | 9 | #### Final Output Format: 10 | You only need to return the newly added code, not the original code 11 | You will format your final output as follows: 12 | ```python 13 | {{new_code}} 14 | ``` 15 | 16 | ### Example of adding new code: 17 | - Requirements 18 | ```python 19 | class MathOperations: 20 | def add(self, a, b): 21 | return a + b 22 | TODO: add a new function named divide 23 | ``` 24 | - Expected Final Output: (only need to return the newly added code) 25 | ```python 26 | def divide(self, a, b): 27 | if b == 0: 28 | raise ValueError("The divisor 'b' cannot be zero.") 29 | return a / b 30 | ``` 31 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_generator/python/system_prompt_cypher.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a Cypher code assistant proficient in querying graph databases. Your task is to write Cypher queries based on the queries provided by the code assistant specializing in cross-file code tasks. Your goal is to extract the relevant information from the code graph database to support the adding new code requirements for code. 3 | 4 | # LIMITATIONS # 5 | 1. You cannot modify or add to the schema of the code graph database. 6 | 2. You must rely on the problem statements and constraints given by the cross-file code completion assistant. 7 | 8 | # CODE GRAPH DATABASE # 9 | The code graph database is derived from static parsing of the project. You will write Cypher queries to extract necessary information based on the text queries provided. The database is presumed error-free. If unexpected responses arise, it might be due to incorrect queries, missing nodes, or edges from indirect calls, dynamic behaviors, and complex control flows. 10 | 11 | # SCHEMA OF THE CODE GRAPH DATABASE # 12 | {{python_db_schema}} 13 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_generator/python/system_prompt_primary.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are an assistant for adding new code requirements. Your task is to add a user's requirement to an existing codebase and generate the corresponding code. Before doing this, you need to determine what information needs to be retrieved and write text queries to retrieve relevant information from the given code graph database. 3 | 4 | # LIMITATIONS # 5 | 1. You can only process text content, including code; 6 | 2. You cannot interpret graphical or visual content; 7 | 3. You have no access to the original project code instead of the information stored in the code graph database; 8 | 9 | # CODE GRAPH DATABASE # 10 | The code graph database is derived from static parsing of the project. Another code assistant, proficient in Cypher and graph databases, will translate your text queries into Cypher queries to extract the needed information based on your problem statement. The database is assumed to be devoid of issues. If unexpected responses occur during querying, it might be due to a faulty query, or missing nodes or edges resulting from indirect calls, dynamic behaviors, and complex control flows. 11 | 12 | # SCHEMA OF THE CODE GRAPH DATABASE # 13 | {{python_db_schema}} 14 | 15 | # REQUIREMENTS # 16 | 1. Since the Cypher code assistant can only translate your text queries into corresponding Cypher queries and does not have information about the given code files, please ensure that your text queries are very clear and include explicit input parameters. 17 | 2. Since your task involves cross-file code, it is crucial to pay attention to the module references in the import section. When writing queries, make sure to specify the module's full name. For example, for the statement from x.y import z, emphasize that the full module name should be 'x.y.z' rather than just 'z'. 18 | 19 | # Notes For code generate: 20 | 1. Any non-existent/not-found method/field/function is not allowed. 21 | 2. No assumptions are allowed. 22 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_unittester/python/start_prompt_cypher.txt: -------------------------------------------------------------------------------- 1 | ### Text Queries: 2 | ${text_queries} 3 | 4 | ### Task Instructions: 5 | Your task is to decompose the given text queries into several simple ones and try to use precise or fuzzy matching, and then translate the decomposed text queries into the corresponding Cypher queries. You answer should follow the below formats: 6 | 7 | [start_of_cypher_queries] 8 | ### Query 1 9 | **decomposed text query**: 10 | ```cypher 11 | 12 | ``` 13 | 14 | ### Query 2 15 | **decomposed text query**: 16 | ```cypher 17 | 18 | ``` 19 | ... 20 | 21 | ### Query n 22 | **decomposed text query**: 23 | ```cypher 24 | 25 | ``` 26 | [end_of_cypher_queries] 27 | 28 | Here are some useful tips: 29 | 1. Try adding exception handling logic (e.g., `OPTIONAL`, `OR`) to return error information or handle edge cases in Cypher queries. 30 | 2. Use appropriate Cypher patterns and aggregation functions to handle exceptions and edge cases that may occur in queries. 31 | 3. Try to use the nodes (CLASS, METHOD, FUNCTION, FIELD, GLOBAL_VARIABLE, MODULE) and egdes (CONTAINS, HAS_METHOD, HAS_FIELD, USES, INHERITES) in the schema to rewrite text queries instead of using ambiguous expressions like `attributes` or `objects`. 32 | 4. Use fuzzy matching when retrieving nodes to avoid issues with absolute paths. For example, use the following Cypher query: `WHERE m.name =~ '.*'` 33 | 34 | Example: 35 | when you try to find MODULE a and FUNCTION b, first try to use precise matching: 36 | ``` 37 | MATCH (m:MODULE) 38 | WHERE m.name = 'a' 39 | MATCH (m)-[:CONTAINS]->(f:FUNCTION) 40 | WHERE f.name = 'b' 41 | RETURN m, f 42 | ``` 43 | and try to use fuzzy matching: 44 | ``` 45 | MATCH (m:MODULE) 46 | WHERE m.name =~ '.*a' 47 | MATCH (m)-[:CONTAINS]->(f:FUNCTION) 48 | WHERE f.name =~ '.*b' 49 | RETURN m, f 50 | ``` 51 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_unittester/python/system_prompt_cypher.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a Cypher code assistant proficient in querying graph databases. Your task is to write Cypher queries based on the queries provided by the code assistant specializing in cross-file code tasks. Your goal is to extract the relevant information from the code graph database to support the writing professional unittests for code. 3 | 4 | # Information Needed for Writing Unit Tests 5 | 1. Understand the Class Functionality: Retrieve all methods' `name` or `code` Attributes of the CLASS. Determine the key functionalities and methods of the class that require testing. 6 | 2. Understand Class Dependencies: Retrieve all `INHERITS` edges related CLASS. Identify and understand the dependencies of the class, including parent classes and associated classes or modules. 7 | 8 | # LIMITATIONS # 9 | 1. You cannot modify or add to the schema of the code graph database. 10 | 2. You must rely on the problem statements and constraints given by the cross-file code completion assistant. 11 | 12 | # CODE GRAPH DATABASE # 13 | The code graph database is derived from static parsing of the project. You will write Cypher queries to extract necessary information based on the text queries provided. The database is presumed error-free. If unexpected responses arise, it might be due to incorrect queries, missing nodes, or edges from indirect calls, dynamic behaviors, and complex control flows. 14 | 15 | # SCHEMA OF THE CODE GRAPH DATABASE # 16 | {{python_db_schema}} 17 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/prompt/code_unittester/python/system_prompt_primary.txt: -------------------------------------------------------------------------------- 1 | # ROLE # 2 | You are a professional unittest engineer. Your task is to analyze a specific class/function's functionality and write professional unittests for it. You may write text queries to retrieve information from the given code graph database. 3 | 4 | # LIMITATIONS # 5 | 1. You can only process text content, including code; 6 | 2. You cannot interpret graphical or visual content; 7 | 3. You have no access to the original project code instead of the information stored in the code graph database; 8 | 9 | # CODE GRAPH DATABASE # 10 | The code graph database is derived from static parsing of the project. Another code assistant, proficient in Cypher and graph databases, will translate your text queries into Cypher queries to extract the needed information based on your problem statement. The database is assumed to be devoid of issues. If unexpected responses occur during querying, it might be due to a faulty query, or missing nodes or edges resulting from indirect calls, dynamic behaviors, and complex control flows. 11 | 12 | # SCHEMA OF THE CODE GRAPH DATABASE # 13 | {{python_db_schema}} 14 | 15 | # REQUIREMENTS # 16 | 1. Since the Cypher code assistant can only translate your text queries into corresponding Cypher queries and does not have information about the given code files, please ensure that your text queries are very clear and include explicit input parameters. 17 | 2. Since your task involves cross-file code, it is crucial to pay attention to the module references in the import section. When writing queries, make sure to specify the module's full name. For example, for the statement from x.y import z, emphasize that the full module name should be 'x.y.z' rather than just 'z'. 18 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/requirements.txt: -------------------------------------------------------------------------------- 1 | fasteners 2 | py2neo 3 | streamlit 4 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/run.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import sys 4 | 5 | 6 | def main(): 7 | # Determine the project root directory 8 | project_root = os.path.abspath( 9 | os.path.join(os.path.dirname(__file__), '..', '..')) 10 | # Add project root to PYTHONPATH if not already in it 11 | if project_root not in sys.path: 12 | sys.path.append(project_root) 13 | 14 | # Export PYTHONPATH for subprocess 15 | os.environ['PYTHONPATH'] = project_root 16 | 17 | # Path to your Streamlit app 18 | streamlit_app_path = os.path.join(project_root, 'apps', 'codexgraph_agent', 19 | 'help.py') 20 | 21 | # Print PYTHONPATH for debugging purposes 22 | print(f"PYTHONPATH is set to: {os.environ['PYTHONPATH']}") 23 | 24 | # Run the Streamlit app 25 | subprocess.run(['streamlit', 'run', streamlit_app_path]) 26 | 27 | 28 | if __name__ == '__main__': 29 | main() 30 | -------------------------------------------------------------------------------- /apps/codexgraph_agent/setting.json: -------------------------------------------------------------------------------- 1 | { 2 | "setting": { 3 | "prompt_path": "apps/codexgraph_agent/prompt", 4 | "repo_path": "", 5 | "llm_model_name": "deepseek-coder", 6 | "llm_temperature": 1.0, 7 | "max_iterations": 5, 8 | "project_list": [ 9 | "code_chat", 10 | "code_comment", 11 | "code_debug", 12 | "code_generate", 13 | "code_unittest" 14 | ], 15 | "project_id": "code_chat", 16 | "env_path_dict": { 17 | "env_path": "", 18 | "working_directory": "" 19 | }, 20 | "neo4j": { 21 | "url": "", 22 | "user": "", 23 | "password": "", 24 | "database_name": "" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /apps/datascience_assistant/README.md: -------------------------------------------------------------------------------- 1 | # Data Science Assistant with Streamlit ⭐ 2 | Data Science Assistant (hereinafter referred to as DS Assistant) is a Data Science Assistant developed based on the modelscope-agent framework, which can automatically perform exploratory Data analysis (EDA) in Data Science tasks according to user needs, Data preprocessing, feature engineering, model training, model evaluation and other steps are fully automated. 3 | 4 | Detailed information can be found in the [documentation](../../docs/source/agents/data_science_assistant.md). 5 | 6 | ## Quick Start 7 | Streamlit is a Python library that makes it easy to create and share beautiful, custom web apps for machine learning and data science. 8 | 9 | To run the DS Assistant in streamlit, you need to install additional libraries. You can install it using pip: 10 | ```bash 11 | pip install streamlit mistune matplotlib nbconvert 12 | ``` 13 | 14 | Then, you can run the DS Assistant using the following command: 15 | ```bash 16 | cd ../../ 17 | streamlit run ./apps/datascience_assistant/app.py 18 | ``` 19 | 20 | After running the command, a new tab will open in your default web browser with the DS Assistant running. 21 | 22 | you can upload your dataset and write your request. 23 | ![img_2.png](../../resources/data_science_assistant_streamlit_1.png) 24 | 25 | After submitting your request, DS Assistant will automatically generate a plan for this request. 26 | ![img_2.png](../../resources/data_science_assistant_streamlit_4.png) 27 | 28 | After that, DS Assistant will automatically excute every task, you can view all of the codes and details in streamlit 29 | ![img_3.png](../../resources/data_science_assistant_streamlit_2.png) 30 | 31 | After you have finished using the DS Assistant, you can directly convert the running process to a pdf 32 | ![img_5.png](../../resources/data_science_assistant_streamlit_3.png) 33 | -------------------------------------------------------------------------------- /apps/datascience_assistant/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | import streamlit as st 5 | 6 | os.environ['DASHSCOPE_API_KEY'] = 'YOUR_API_KEY' 7 | 8 | 9 | def setup_project_paths(): 10 | current_dir = os.path.dirname(os.path.abspath(__file__)) # noqa 11 | project_root_path = os.path.abspath(os.path.join(current_dir, 12 | '../../')) # noqa 13 | sys.path.append(project_root_path) # noqa 14 | 15 | 16 | if __name__ == '__main__': 17 | setup_project_paths() 18 | from modelscope_agent.agents.data_science_assistant import \ 19 | DataScienceAssistant # noqa 20 | from modelscope_agent.tools.metagpt_tools.tool_recommend import \ 21 | TypeMatchToolRecommender # noqa 22 | st.title('Data Science Assistant') 23 | st.write( 24 | 'This is a data science assistant that can help you with your data science tasks.' 25 | ) 26 | st.write( 27 | 'Please input your request and upload files then click the submit button.' 28 | ) 29 | 30 | files = st.file_uploader( 31 | 'Please upload files that you need. ', accept_multiple_files=True) 32 | last_file_name = '' 33 | user_request = st.text_area('User Request') 34 | if st.button('submit'): 35 | llm_config = { 36 | 'model': 'qwen2-72b-instruct', 37 | 'model_server': 'dashscope', 38 | } 39 | data_science_assistant = DataScienceAssistant( 40 | llm=llm_config, 41 | tool_recommender=TypeMatchToolRecommender(tools=[''])) 42 | for file in files: 43 | with open(file.name, 'wb') as f: 44 | f.write(file.getbuffer()) 45 | data_science_assistant.run(user_request=user_request, streamlit=True) 46 | -------------------------------------------------------------------------------- /apps/mcp-playground/assets/deepseek.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/mcp-playground/assets/deepseek.png -------------------------------------------------------------------------------- /apps/mcp-playground/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/mcp-playground/assets/logo.png -------------------------------------------------------------------------------- /apps/mcp-playground/assets/mcp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/mcp-playground/assets/mcp.png -------------------------------------------------------------------------------- /apps/mcp-playground/assets/meta.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/mcp-playground/assets/meta.webp -------------------------------------------------------------------------------- /apps/mcp-playground/assets/modelscope-mcp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/mcp-playground/assets/modelscope-mcp.png -------------------------------------------------------------------------------- /apps/mcp-playground/assets/qwen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/mcp-playground/assets/qwen.png -------------------------------------------------------------------------------- /apps/mcp-playground/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import json 4 | 5 | is_cn_env = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio' 6 | 7 | api_key = os.getenv('MODELSCOPE_API_KEY') 8 | 9 | internal_mcp_config = json.loads( 10 | os.getenv('INTERNAL_MCP_CONFIG', '{"mcpServers": {}}')) 11 | 12 | # oss 13 | endpoint = os.getenv('OSS_ENDPOINT') 14 | 15 | region = os.getenv('OSS_REGION') 16 | 17 | bucket_name = os.getenv('OSS_BUCKET_NAME') 18 | -------------------------------------------------------------------------------- /apps/mcp-playground/requirements.txt: -------------------------------------------------------------------------------- 1 | exceptiongroup 2 | gradio 3 | https://modelscope-agent.oss-cn-hangzhou.aliyuncs.com/tmp/modelscope_agent-0.8.0-py3-none-any.whl 4 | langchain 5 | langchain-openai 6 | langchain_core 7 | langchain_mcp_adapters 8 | langgraph 9 | mcp 10 | modelscope_studio==1.2.3 11 | oss2 12 | -------------------------------------------------------------------------------- /apps/mcp-playground/tools/oss.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | from http import HTTPStatus 3 | 4 | import oss2 5 | from env import bucket_name, endpoint, region 6 | from oss2.credentials import EnvironmentVariableCredentialsProvider 7 | 8 | # OSS_ACCESS_KEY_ID and OSS_ACCESS_KEY_SECRET。 9 | auth = oss2.ProviderAuthV4(EnvironmentVariableCredentialsProvider()) 10 | 11 | bucket = oss2.Bucket(auth, endpoint, bucket_name, region=region) 12 | 13 | 14 | def file_path_to_oss_url(file_path: str): 15 | if file_path.startswith('http'): 16 | return file_path 17 | ext = file_path.split('.')[-1] 18 | object_name = f'studio-temp/mcp-playground/{uuid.uuid4()}.{ext}' 19 | response = bucket.put_object_from_file(object_name, file_path) 20 | file_url = file_path 21 | if response.status == HTTPStatus.OK: 22 | file_url = bucket.sign_url( 23 | 'GET', object_name, 60 * 60, slash_safe=True) 24 | return file_url 25 | -------------------------------------------------------------------------------- /apps/mobile_agent/README.md: -------------------------------------------------------------------------------- 1 | # Mobile-Agent-v2: Mobile Device Operation Assistant with Effective Navigation via Multi-Agent Collaboration 2 | 3 | ## 🔧Getting Started 4 | 5 | ### Installation 6 | ``` 7 | pip install -r requirements.txt 8 | ``` 9 | 10 | ### Preparation for Connecting Mobile Device 11 | 1. Download the [Android Debug Bridge](https://developer.android.com/tools/releases/platform-tools?hl=en). 12 | 2. Turn on the ADB debugging switch on your Android phone, it needs to be turned on in the developer options first. 13 | 3. Connect your phone to the computer with a data cable and select "Transfer files". 14 | 4. Test your ADB environment as follow: ```/path/to/adb devices```. If the connected devices are displayed, the preparation is complete. 15 | 5. If you are using a MAC or Linux system, make sure to turn on adb permissions as follow: ```sudo chmod +x /path/to/adb``` 16 | 6. If you are using Windows system, the path will be ```xx/xx/adb.exe``` 17 | 18 | 19 | 20 | ### Run 21 | 22 | The related args to run demo include: 23 | * `--adb_path`: The path to debug with your adb. 24 | * `--openai_api_key`: The OpenAI key to call llm. 25 | * `--dashscope_api_key`: The Dashscope key to call qwen-vl. 26 | * `--instruction`: Your instruction. 27 | -------------------------------------------------------------------------------- /apps/mobile_agent/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/openai/CLIP.git 2 | keras==2.9.0 3 | matplotlib 4 | modelscope 5 | opencv-python 6 | pyclipper 7 | pycocotools 8 | SentencePiece 9 | shapely 10 | supervision 11 | TensorFlow==2.9.1 12 | tf_keras 13 | tf_slim 14 | timm 15 | torch 16 | torchvision 17 | transformers 18 | -------------------------------------------------------------------------------- /apps/mobile_agent/run.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | from modelscope_agent.agents.mobile_agent_v2 import MobileAgentV2 5 | from modelscope_agent.environment.android_adb import ADBEnvironment 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('--adb_path', type=str, default='./adb/adb') 9 | parser.add_argument( 10 | '--openai_api_key', type=str, default=os.getenv('OPENAI_API_KEY')) 11 | parser.add_argument( 12 | '--dashscope_api_key', type=str, default=os.getenv('DASHSCOPE_API_KEY')) 13 | parser.add_argument( 14 | '--instruction', type=str, default="Tell me today's weathers") 15 | 16 | args = parser.parse_args() 17 | 18 | adb_path = args.adb_path 19 | 20 | os.environ['OPENAI_API_KEY'] = args.openai_api_key 21 | # used to calling qwen-vl for description of icon during perception 22 | os.environ['DASHSCOPE_API_KEY'] = args.dashscope_api_key 23 | 24 | instruction = args.instruction 25 | 26 | llm_config = { 27 | 'model': 'gpt-4o', 28 | 'model_server': 'openai', 29 | } 30 | 31 | env = ADBEnvironment(adb_path) 32 | 33 | agent = MobileAgentV2( 34 | env=env, 35 | llm_decision=llm_config, 36 | llm_planner=llm_config, 37 | llm_reflect=llm_config) 38 | 39 | agent.run(instruction) 40 | -------------------------------------------------------------------------------- /apps/msgpt/run_msgpt.sh: -------------------------------------------------------------------------------- 1 | export PYTHONPATH=$PYTHONPATH:../../ 2 | python app.py 3 | -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/__init__.py -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/assets/app.css: -------------------------------------------------------------------------------- 1 | .story-cardlist .card { 2 | cursor: pointer; 3 | } 4 | .story-cardlist .card:hover { 5 | box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2); 6 | transition: box-shadow 0.3s ease-in-out; 7 | } 8 | -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/assets/app.js: -------------------------------------------------------------------------------- 1 | function init() { 2 | window.js_choose_story = function(story_id) { 3 | var btn = document.getElementById('entry_fake_btn'); 4 | btn.setAttribute('data-stroy', story_id); 5 | if (btn) { 6 | btn.click(); 7 | } 8 | } 9 | 10 | window.get_story_id = function(){ 11 | return [document.getElementById('entry_fake_btn').getAttribute('data-stroy')] 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/requirements.txt: -------------------------------------------------------------------------------- 1 | gradio==4.8 2 | modelscope-agent==0.4.1 3 | modelscope_studio==0.5.2 4 | -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/default_girl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/default_girl.png -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/fanxian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/fanxian.jpg -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/guyi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/guyi.png -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/haitangduoduo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/haitangduoduo.jpg -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/linwaner.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/linwaner.jpeg -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/liyunsi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/liyunsi.png -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/silili.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/silili.jpeg -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/zhandoudou.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/zhandoudou.jpg -------------------------------------------------------------------------------- /apps/multi_roles_chat_room/resources/zhengziyan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/apps/multi_roles_chat_room/resources/zhengziyan.png -------------------------------------------------------------------------------- /config/.env.template: -------------------------------------------------------------------------------- 1 | ## llm model file, including url, token 2 | LLM_CONFIG_FILE=../config/cfg_model_template.json 3 | 4 | ## tool config file, including url, token 5 | TOOL_CONFIG_FILE=../config/cfg_tool_template.json 6 | 7 | ## save directory of outputs 8 | OUTPUT_FILE_DIRECTORY=./tmp 9 | 10 | ## user token 11 | DASHSCOPE_API_KEY=your_dashscope_api_key 12 | MODELSCOPE_API_TOKEN=your_modelscope_api_token 13 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE=registry.cn-beijing.aliyuncs.com/modelscope-repo/modelscope:ubuntu22.04-cuda12.1.0-py310-torch2.1.2-tf2.14.0-1.12.0 2 | FROM $BASE_IMAGE 3 | 4 | # 设置工作目录为 /home/workspace 5 | WORKDIR /home/workspace 6 | 7 | # 将当前目录下的所有文件复制到容器中的/home/workspace目录下 8 | COPY . /home/workspace/ 9 | 10 | # 安装根目录下的依赖 11 | RUN pip install -r requirements.txt 12 | 13 | # 安装apps/agentfabric目录下的依赖 14 | RUN pip install -r apps/agentfabric/requirements.txt 15 | 16 | # 将/home/workspace目录添加到PYTHONPATH环境变量中 17 | ENV PYTHONPATH="${PYTHONPATH}:/home/workspace" 18 | 19 | # 设置容器启动后执行的命令 20 | CMD ["python", "apps/agentfabric/app.py"] 21 | -------------------------------------------------------------------------------- /docker/build_docker.sh: -------------------------------------------------------------------------------- 1 | docker build -t modelscope-agent:v1.0 -f docker/Dockerfile . 2 | -------------------------------------------------------------------------------- /docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | tool_manager_service: 5 | build: 6 | context: ../ 7 | dockerfile: docker/tool_manager.dockerfile 8 | ports: 9 | - "31511:31511" 10 | entrypoint: uvicorn modelscope_agent_servers.tool_manager_server.api:app --host 0.0.0.0 --port 31511 11 | -------------------------------------------------------------------------------- /docker/dockerfile.agentfabric: -------------------------------------------------------------------------------- 1 | FROM registry.cn-beijing.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch2.0.1-tf2.13.0-1.9.5 2 | 3 | RUN apt update && apt-get install -y curl wget inetutils-ping vim less 4 | RUN pip install gunicorn gradio dashscope "datasets>=2.8.0" ipython "langchain<=0.0.292" "modelscope>=1.7.0" moviepy ms-swift openai opencv-python openpyxl Pillow "pydantic>=2.0.0" pypdf pytest python-dotenv soundfile "transformers>=4.29.0" transformers_stream_generator docx2txt python-pptx 5 | 6 | COPY requirements.txt /home/workspace/requirements.txt 7 | RUN pip install -r /home/workspace/requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ 8 | 9 | COPY . /home/workspace 10 | 11 | WORKDIR /home/workspace/apps/agentfabric 12 | -------------------------------------------------------------------------------- /docker/tool_manager.dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | WORKDIR /app 4 | 5 | # install basic packages 6 | RUN apt-get update && apt-get install -y \ 7 | curl \ 8 | wget \ 9 | git \ 10 | vim \ 11 | nano \ 12 | unzip \ 13 | zip \ 14 | python3 \ 15 | python3-pip \ 16 | python3-venv \ 17 | python3-dev \ 18 | build-essential \ 19 | && rm -rf /var/lib/apt/lists/* 20 | 21 | # file ready 22 | RUN rm -rf /tmp/* /var/tmp/* 23 | RUN mkdir -p assets 24 | RUN mkdir -p workspace 25 | 26 | # install dependency 27 | ENV PYTHONPATH $PYTHONPATH:/app/modelscope_agent_servers 28 | RUN pip install fastapi pydantic uvicorn docker sqlmodel 29 | 30 | COPY modelscope_agent_servers /app/modelscope_agent_servers 31 | 32 | #ENTRYPOINT exec uvicorn modelscope_agent_servers.tool_manager_server.api:app --host 0.0.0.0 --port 31511 33 | -------------------------------------------------------------------------------- /docker/tool_node.dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | WORKDIR /app 4 | 5 | # install basic packages 6 | RUN apt-get update && apt-get install -y \ 7 | curl \ 8 | wget \ 9 | git \ 10 | vim \ 11 | nano \ 12 | unzip \ 13 | zip \ 14 | python3 \ 15 | python3-pip \ 16 | python3-venv \ 17 | python3-dev \ 18 | build-essential \ 19 | && rm -rf /var/lib/apt/lists/* 20 | 21 | 22 | # file ready 23 | RUN rm -rf /tmp/* /var/tmp/* 24 | RUN mkdir -p assets 25 | RUN mkdir -p workspace 26 | 27 | ## install modelscope_agent 28 | #RUN pip install torch 29 | #RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y 30 | 31 | COPY requirements.txt . 32 | RUN pip install --no-cache-dir -r requirements.txt 33 | RUN pip install fastapi uvicorn 34 | 35 | # install ffmpeg 36 | RUN wget -O ffmpeg.tar.xz https://modelscope-agent.oss-cn-hangzhou.aliyuncs.com/resources/ffmpeg.tar.xz && \ 37 | tar xvf ffmpeg.tar.xz 38 | 39 | 40 | ENV PYTHONPATH $PYTHONPATH:/app/modelscope_agent:/app/modelscope_agent_servers 41 | ENV BASE_TOOL_DIR /app/assets 42 | ENV PATH=/app/ffmpeg-git-20240629-amd64-static:$PATH 43 | 44 | # install tool_node 45 | COPY modelscope_agent_servers /app/modelscope_agent_servers 46 | COPY modelscope_agent /app/modelscope_agent 47 | 48 | # start up script file 49 | COPY scripts/run_tool_node.sh /app/run_tool_node.sh 50 | RUN chmod +x /app/run_tool_node.sh 51 | #ENTRYPOINT exec uvicorn tool_service.tool_node.api:app --host 0.0.0.0 --port $PORT 52 | 53 | 54 | #ENTRYPOINT [ "uvicorn", "tool_service.main:app", "--host", "0.0.0.0","--port","31513" ] 55 | # 56 | # docker build -f tool_service/docker/tool_node.dockerfile -t modelscope-agent/tool-node:v0.1 . 57 | # docker push modelscope-agent/tool-node:v0.1 58 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source_en 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | ## maintain docs 2 | 1. build docs 3 | ```shell 4 | # in root directory: 5 | make docs 6 | ``` 7 | 8 | 2. doc string format 9 | 10 | We adopt the google style docstring format as the standard, please refer to the following documents. 11 | 1. Google Python style guide docstring [link](http://google.github.io/styleguide/pyguide.html#381-docstrings) 12 | 2. Google docstring example [link](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) 13 | 3. sample:torch.nn.modules.conv [link](https://pytorch.org/docs/stable/_modules/torch/nn/modules/conv.html#Conv1d) 14 | 4. load function as an example: 15 | 16 | ```python 17 | def load(file, file_format=None, **kwargs): 18 | """Load data from json/yaml/pickle files. 19 | 20 | This method provides a unified api for loading data from serialized files. 21 | 22 | Args: 23 | file (str or :obj:`Path` or file-like object): Filename or a file-like 24 | object. 25 | file_format (str, optional): If not specified, the file format will be 26 | inferred from the file extension, otherwise use the specified one. 27 | Currently supported formats include "json", "yaml/yml". 28 | 29 | Examples: 30 | >>> load('/path/of/your/file') # file is stored in disk 31 | >>> load('https://path/of/your/file') # file is stored on internet 32 | >>> load('oss://path/of/your/file') # file is stored in petrel 33 | 34 | Returns: 35 | The content from the file. 36 | """ 37 | ``` 38 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/resource/agentfabric_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/agentfabric_0.png -------------------------------------------------------------------------------- /docs/resource/agentfabric_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/agentfabric_1.png -------------------------------------------------------------------------------- /docs/resource/agentfabric_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/agentfabric_2.png -------------------------------------------------------------------------------- /docs/resource/agentfabric_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/agentfabric_3.png -------------------------------------------------------------------------------- /docs/resource/agentfabric_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/agentfabric_4.png -------------------------------------------------------------------------------- /docs/resource/local_deploy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/local_deploy.png -------------------------------------------------------------------------------- /docs/resource/local_deploy_agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/local_deploy_agent.png -------------------------------------------------------------------------------- /docs/resource/terminal-file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/terminal-file.png -------------------------------------------------------------------------------- /docs/resource/tool-readme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/docs/resource/tool-readme.png -------------------------------------------------------------------------------- /docs/source/.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.12" 13 | 14 | # Build documentation in the "docs/" directory with Sphinx 15 | sphinx: 16 | configuration: docs/source/conf.py 17 | 18 | # Optionally build your docs in additional formats such as PDF and ePub 19 | # formats: 20 | # - pdf 21 | # - epub 22 | 23 | # Optional but recommended, declare the Python requirements required 24 | # to build your documentation 25 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 26 | python: 27 | install: 28 | - requirements: requirements/docs.txt 29 | - requirements: requirements.txt 30 | -------------------------------------------------------------------------------- /docs/source/get_started/installation.md: -------------------------------------------------------------------------------- 1 | # 安装 2 | 3 | ## 使用conda 4 | 可以使用pip和conda设置本地ModelScope-agent环境。我们建议使用anaconda或miniconda来创建本地python环境: 5 | 6 | ```shell 7 | conda create -n ms_agent python=3.10 8 | conda activate ms_agent 9 | ``` 10 | 克隆仓库并安装依赖 11 | ```shell 12 | git clone https://github.com/modelscope/modelscope-agent.git 13 | cd modelscope-agent 14 | pip install -r requirements.txt 15 | 16 | # 将当前工作目录设置为PYTHONPATH环境变量 17 | export PYTHONPATH=$PYTHONPATH:`pwd` 18 | ``` 19 | -------------------------------------------------------------------------------- /docs/source/get_started/quickstart.md: -------------------------------------------------------------------------------- 1 | # 快速开始 2 | 3 | agent结合了大型语言模型(LLM)以及特定任务的工具,并利用LLM来确定为了完成用户任务需要调用哪个或哪些工具。 4 | 5 | 在一开始,您所需要做的就是使用相应的任务初始化一个`RolePlay`对象。 6 | 7 | - 样本代码使用了 qwen-max 模型、绘图工具和天气预报工具。 8 | - 使用 qwen-max 模型需要将示例中的 YOUR_DASHSCOPE_API_KEY 替换为您的 API-KEY,以便代码正常运行。您的 YOUR_DASHSCOPE_API_KEY 可以在[这里](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key)获得。绘图工具也调用了 DASHSCOPE API(wanx),因此不需要额外配置。 9 | - 在使用天气预报工具时,需要将示例中的 YOUR_AMAP_TOKEN 替换为您的高德天气 API-KEY,以便代码能够正常运行。您的 YOUR_AMAP_TOKEN 可以在[这里](https://lbs.amap.com/api/javascript-api-v2/guide/services/weather)获得。 10 | 11 | ```Python 12 | # 配置环境变量;如果您已经提前将api-key提前配置到您的运行环境中,可以省略这个步骤 13 | import os 14 | os.environ['DASHSCOPE_API_KEY']=YOUR_DASHSCOPE_API_KEY 15 | os.environ['AMAP_TOKEN']=YOUR_AMAP_TOKEN 16 | 17 | # 选用RolePlay 配置agent 18 | from modelscope_agent.agents.role_play import RolePlay # NOQA 19 | 20 | role_template = '你扮演一个天气预报助手,你需要查询相应地区的天气,并调用给你的画图工具绘制一张城市的图。' 21 | 22 | llm_config = {'model': 'qwen-max', 'model_server': 'dashscope'} 23 | 24 | # input tool name 25 | function_list = ['amap_weather', 'image_gen'] 26 | 27 | bot = RolePlay( 28 | function_list=function_list, llm=llm_config, instruction=role_template) 29 | 30 | response = bot.run('朝阳区天气怎样?') 31 | 32 | text = '' 33 | for chunk in response: 34 | text += chunk 35 | ``` 36 | 37 | 结果 38 | - Terminal 运行 39 | 40 | ```shell 41 | # 第一次调用llm的输出 42 | Action: amap_weather 43 | Action Input: {"location": "朝阳区"} 44 | 45 | # 第二次调用llm的输出 46 | 目前,朝阳区的天气状况为阴天,气温为1度。 47 | 48 | Action: image_gen 49 | Action Input: {"text": "朝阳区城市风光", "resolution": "1024*1024"} 50 | 51 | # 第三次调用llm的输出 52 | 目前,朝阳区的天气状况为阴天,气温为1度。同时,我已为你生成了一张朝阳区的城市风光图,如下所示: 53 | 54 | ![](https://dashscope-result-sh.oss-cn-shanghai.aliyuncs.com/1d/45/20240204/3ab595ad/96d55ca6-6550-4514-9013-afe0f917c7ac-1.jpg?Expires=1707123521&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=RsJRt7zsv2y4kg7D9QtQHuVkXZY%3D) 55 | ``` 56 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. modelscope-agent documentation file, 2 | You can adapt this file completely to your liking, but it should at least 3 | contain the root `toctree` directive. 4 | 5 | Modelscope-Agent DOCUMENTATION 6 | ======================== 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | :caption: Get Started 11 | 12 | get_started/installation.md 13 | get_started/introduction.md 14 | get_started/quickstart.md 15 | 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | :caption: Module Overview 20 | 21 | modules/llm.md 22 | modules/tool.md 23 | modules/memory.md 24 | modules/retrieve.md 25 | modules/callback.md 26 | 27 | .. toctree:: 28 | :maxdepth: 2 29 | :caption: Tools Contribution 30 | 31 | contributing/tool_contribution_guide.md 32 | 33 | 34 | .. toctree:: 35 | :maxdepth: 2 36 | :caption: LLM Application 37 | 38 | training_llms/train.md 39 | training_llms/train_agentfabric_llm_tool_use.md 40 | llms/qwen2_tool_calling.md 41 | llms/llama3.1_tool_calling.md 42 | 43 | 44 | .. toctree:: 45 | :maxdepth: 2 46 | :caption: Use cases 47 | 48 | use_cases/application.md 49 | use_cases/code_interpreter_case.md 50 | use_cases/llama3_for_agent.md 51 | use_cases/openAPI_for_agent.md 52 | deployment/local_deploy.md 53 | 54 | 55 | .. toctree:: 56 | :maxdepth: 2 57 | :caption: Agents 58 | 59 | agents/data_science_assistant.md 60 | 61 | 62 | 63 | 64 | Indices and tables 65 | ================== 66 | * :ref:`genindex` 67 | * :ref:`modindex` 68 | * :ref:`search` 69 | -------------------------------------------------------------------------------- /docs/source/modules/callback.md: -------------------------------------------------------------------------------- 1 | # Callback模块使用说明 2 | 3 | 我们设计了一套回调机制,允许用户可以自定义的在`Agent`执行的关键节点中插入想要执行的代码,便于实现诸如日志记录等功能。 4 | 5 | 目前,我们提供的可插入回调函数的节点位置包括`Agent`中常见的`LLM,tool,rag `等模块的执行以及`Agent`本身每次/轮执行的开始和结束。 6 | 7 | 用户可以在Agent实例化的时候传入想要使用的回调函数。这些回调函数会通过`CallbackManager`类统一管理,并在相应的节点执行。一个简单的自定义回调函数的定义和使用样例如下所示: 8 | 9 | ```Python 10 | class SimplaCallback(BaseCallback): 11 | def on_llm_start(self, *args, **kwargs): 12 | print('start calling llm') 13 | 14 | def on_rag_start(self, *args, **kwargs): 15 | print('start calling rag') 16 | 17 | def on_tool_start(self, *args, **kwargs): 18 | print('start calling tool') 19 | 20 | ... 21 | bot = RolePlay(function_list=function_list,llm=llm_config, instruction=role_template, callbacks=[callback], stream=False) 22 | 23 | bot.run('xxx') 24 | ``` 25 | 26 | ## RunStateCallback 27 | 28 | 我们提供了`RunStateCallback`,用于记录Agent执行过程中的中间状态,例如工具的调用/执行结果,RAG的召回结果等。如果使用了`RunStateCallback`,可以通过`run_states`属性获取中间调用的结果。一个可能的`run_states`格式如下: 29 | 30 | ```Python 31 | { 32 | # 每轮的run_state 33 | 1: 34 | [ 35 | # llm/tool/rag调用的具体结果 36 | RunState(type='llm', name='qwen-max', content='Action: RenewInstance\nAction Input: {"instance_id": "i-rj90a7e840y5cde", "period": "10"}\n', create_time=1720691946), 37 | RunState(type='tool_input', name='RenewInstance', content='{"instance_id": "i-rj90a7e840y5cde", "period": "10"}', create_time=1720691946), 38 | RunState(type='tool_output', name='RenewInstance', content="{'result': '已完成ECS实例ID为i-rj90a7e840y5cde的续费,续费时长10月'}", create_time=1720691946) 39 | ] 40 | 41 | 2: 42 | [ 43 | RunState(type='llm', name='qwen-max', content='已经完成了ECS实例ID为i-rj90a7e840y5cde的续费操作,续费时长为10个月。', create_time=1720691949) 44 | ] 45 | } 46 | ``` 47 | -------------------------------------------------------------------------------- /docs/source/modules/retrieve.md: -------------------------------------------------------------------------------- 1 | # Retrieve模块 2 | 3 | ## 使用 Langchain VectorStore 实现长期记忆 4 | 我们已经通过简单地拼接历史记录实现了agent的短期记忆。然而,对于长期记忆,我们可以引入`langchain.vectorstores`和`langchain.embeddings`。这些组件被封装在`Retrieval`模块中。 5 | 6 | 在`Retrieval`模块中,我们从 Retrieval 类派生了两个类: 7 | 8 | - `ToolRetrieval`:这个类用于工具检索。尽管工具的总数可能很多,但对于特定任务而言,只有3-5个工具可能是相关的。`ToolRetrieval`类负责筛选所有的工具。 9 | - `KnowledgeRetrieval`:这个类用于构建本地知识库。对于某些任务,可能需要特定领域的相关知识。`KnowledgeRetrieval`类负责检索与给定任务相关的领域知识。 10 | 11 | 12 | 我们使用`DashScopeEmbeddings`和`FAISS`作为默认的嵌入和向量存储。但您可以轻松指定您想要使用的嵌入和向量存储。 13 | 14 | ```Python 15 | class Retrieval: 16 | def __init__(self, 17 | embedding: Embeddings = None, 18 | vs_cls: VectorStore = None, 19 | top_k: int = 5, 20 | vs_params: Dict = {}): 21 | self.embedding = embedding or DashScopeEmbeddings( 22 | model='text-embedding-v1') 23 | self.top_k = top_k 24 | self.vs_cls = vs_cls or FAISS 25 | self.vs_params = vs_params 26 | self.vs = None 27 | 28 | def construct(self, docs): 29 | assert len(docs) > 0 30 | if isinstance(docs[0], str): 31 | self.vs = self.vs_cls.from_texts(docs, self.embedding, 32 | **self.vs_params) 33 | elif isinstance(docs[0], Document): 34 | self.vs = self.vs_cls.from_documents(docs, self.embedding, 35 | **self.vs_params) 36 | ``` 37 | -------------------------------------------------------------------------------- /docs/source/training_llms/train.md: -------------------------------------------------------------------------------- 1 | # 训练 2 | 3 | 我们在ModelScope发布了一个工具数据集,用于LLM的微调训练和评估([工具数据集](https://www.modelscope.cn/datasets/modelscope/ms_hackathon_23_agent_train_dev/summary))。 提供了对应的基于 **ModelScope Library** 的训练脚本。 4 | 5 | ## 训练选项 6 | 7 | 训练脚本支持多种训练方法,可根据您的可用资源进行选择: 8 | 9 | - 使用全参数或 Lora 进行微调。 10 | - 使用集成的 ModelScope DeepspeedHook 进行分布式训练。 11 | 12 | ## 数据预处理 13 | 14 | 一条数据的格式如下所示。 由于我们使用文本生成任务方案来训练LLM,因此需要对原始数据进行预处理。 15 | 16 | ```Python 17 | System: system info(agent info, tool info...). 18 | User: user inputs. 19 | Assistants: 20 | # agent call 21 | <|startofthink|>...<|endofthink|>\n\n 22 | # tool execute 23 | <|startofexec|>...<|endofexec|>\n 24 | # summarize 25 | ... 26 | # may be multiple rounds 27 | ``` 28 | 29 | - 每个数据实例由三个角色组成:system、user和assistant。 LLM应该只关注**assistant**部分。 30 | - **assistant**部分通常由三个部分组成。 LLM应该只考虑agent通话的内容和最终总结。 31 | - 其他不必要的部分使用`IGNORE_INDEX`进行屏蔽,以将它们排除在损失计算之外。 32 | 33 | 34 | ## 训练脚本 35 | 36 | 使用脚本`run_train_ddp.sh`拉起训练。 37 | 38 | ```Shell 39 | CUDA_VISIBLE_DEVICES=0 \ 40 | python llm_sft.py \ 41 | --model_type modelscope-agent-7b \ 42 | --sft_type lora \ 43 | --output_dir runs \ 44 | --dataset damo/MSAgent-Bench \ 45 | --dataset_sample 20000 \ 46 | --dataset_test_ratio 0.02 \ 47 | --max_length 2048 \ 48 | --dtype bf16 \ 49 | --lora_rank 8 \ 50 | --lora_alpha 32 \ 51 | --lora_dropout_p 0.1 \ 52 | --batch_size 1 \ 53 | --learning_rate 1e-4 \ 54 | --gradient_accumulation_steps 16 \ 55 | --eval_steps 50 \ 56 | --save_steps 50 \ 57 | --save_total_limit 2 \ 58 | --logging_steps 20 \ 59 | --use_flash_attn true \ 60 | ``` 61 | 62 | ## 评估 63 | 64 | 训练结束后,我们还提供了一个评估脚本来判断 agent 在测试数据集上的表现。 测试数据集的 ground truth 来自于。。。。。。 65 | 66 | 评估指标包括: 67 | - 工具名称和参数的准确率。 68 | - 摘要相似度指标`Rouge-l`。 69 | -------------------------------------------------------------------------------- /docs/source_en/.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "3.12" 13 | 14 | # Build documentation in the "docs/" directory with Sphinx 15 | sphinx: 16 | configuration: docs/source_en/conf.py 17 | 18 | # Optionally build your docs in additional formats such as PDF and ePub 19 | # formats: 20 | # - pdf 21 | # - epub 22 | 23 | # Optional but recommended, declare the Python requirements required 24 | # to build your documentation 25 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 26 | python: 27 | install: 28 | - requirements: requirements/docs.txt 29 | - requirements: requirements.txt 30 | -------------------------------------------------------------------------------- /docs/source_en/get_started/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | ## conda 4 | One can set up local ModelScope-agent environment using pip and conda. We suggest anaconda or miniconda for creating local python environment: 5 | ```shell 6 | conda create -n ms_agent python=3.10 7 | conda activate ms_agent 8 | ``` 9 | clone repo and install dependency: 10 | ```shell 11 | git clone https://github.com/modelscope/modelscope-agent.git 12 | cd modelscope-agent 13 | pip install -r requirements.txt 14 | 15 | # set pwd to PYTHONPATH 16 | export PYTHONPATH=$PYTHONPATH:`pwd` 17 | ``` 18 | -------------------------------------------------------------------------------- /docs/source_en/index.rst: -------------------------------------------------------------------------------- 1 | .. modelscope-agent documentation file, 2 | You can adapt this file completely to your liking, but it should at least 3 | contain the root `toctree` directive. 4 | 5 | Modelscope-Agent DOCUMENTATION 6 | ======================== 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | :caption: Get Started 11 | 12 | get_started/installation.md 13 | get_started/introduction.md 14 | get_started/quickstart.md 15 | 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | :caption: Module Overview 20 | 21 | modules/llm.md 22 | modules/tool.md 23 | modules/memory.md 24 | modules/retrieve.md 25 | modules/callback.md 26 | 27 | .. toctree:: 28 | :maxdepth: 2 29 | :caption: Tools Contribution 30 | 31 | contributing/tool_contribution_guide.md 32 | 33 | 34 | .. toctree:: 35 | :maxdepth: 2 36 | :caption: LLM Application 37 | 38 | training_llms/train.md 39 | training_llms/train_agentfabric_llm_tool_use.md 40 | llms/qwen2_tool_calling.md 41 | llms/llama3.1_tool_calling.md 42 | 43 | .. toctree:: 44 | :maxdepth: 2 45 | :caption: Use cases 46 | 47 | use_cases/application.md 48 | use_cases/code_interpreter_case.md 49 | use_cases/llama3_for_agent.md 50 | use_cases/openAPI_for_agent.md 51 | deployment/local_deploy.md 52 | 53 | .. toctree:: 54 | :maxdepth: 2 55 | :caption: Agents 56 | 57 | agents/data_science_assistant.md 58 | 59 | 60 | Indices and tables 61 | ================== 62 | * :ref:`genindex` 63 | * :ref:`modindex` 64 | * :ref:`search` 65 | -------------------------------------------------------------------------------- /examples/agents/multi-agents/simple_chat_with_local.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import time 4 | 5 | from modelscope_agent import create_component 6 | from modelscope_agent.agents import RolePlay 7 | from modelscope_agent.task_center import TaskCenter 8 | 9 | REMOTE_MODE = False 10 | 11 | llm_config = { 12 | 'model': 'qwen-max', 13 | 'api_key': os.getenv('DASHSCOPE_API_KEY'), 14 | 'model_server': 'dashscope' 15 | } 16 | function_list = [] 17 | 18 | task_center = create_component( 19 | TaskCenter, name='task_center', remote=REMOTE_MODE) 20 | 21 | logging.warning(msg=f'time:{time.time()} done create task center') 22 | 23 | role_play1 = create_component( 24 | RolePlay, 25 | name='role_play1', 26 | remote=REMOTE_MODE, 27 | llm=llm_config, 28 | function_list=function_list) 29 | 30 | role_play2 = create_component( 31 | RolePlay, 32 | name='role_play2', 33 | remote=REMOTE_MODE, 34 | llm=llm_config, 35 | function_list=function_list) 36 | 37 | task_center.add_agents([role_play1, role_play2]) 38 | 39 | n_round = 2 40 | task = 'who are u' 41 | task_center.send_task_request(task) 42 | while n_round > 0: 43 | 44 | for frame in task_center.step(): 45 | print(frame) 46 | 47 | time.sleep(3) 48 | 49 | n_round -= 1 50 | -------------------------------------------------------------------------------- /examples/agents/multi-agents/simple_chat_with_ray.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import time 4 | 5 | import ray 6 | from modelscope_agent import create_component 7 | from modelscope_agent.agents import RolePlay 8 | from modelscope_agent.multi_agents_utils.executors.ray import RayTaskExecutor 9 | from modelscope_agent.task_center import TaskCenter 10 | 11 | REMOTE_MODE = True 12 | 13 | if REMOTE_MODE: 14 | RayTaskExecutor.init_ray() 15 | 16 | llm_config = { 17 | 'model': 'qwen-max', 18 | 'api_key': os.getenv('DASHSCOPE_API_KEY'), 19 | 'model_server': 'dashscope' 20 | } 21 | function_list = [] 22 | 23 | task_center = create_component( 24 | TaskCenter, name='task_center', remote=REMOTE_MODE) 25 | 26 | logging.warning(msg=f'time:{time.time()} done create task center') 27 | 28 | role_play1 = create_component( 29 | RolePlay, 30 | name='role_play1', 31 | remote=REMOTE_MODE, 32 | llm=llm_config, 33 | function_list=function_list) 34 | 35 | role_play2 = create_component( 36 | RolePlay, 37 | name='role_play2', 38 | remote=REMOTE_MODE, 39 | llm=llm_config, 40 | function_list=function_list) 41 | 42 | ray.get(task_center.add_agents.remote([role_play1, role_play2])) 43 | 44 | n_round = 2 45 | task = 'who are u' 46 | ray.get(task_center.send_task_request.remote(task)) 47 | while n_round > 0: 48 | 49 | for frame in task_center.step.remote(): 50 | print(ray.get(frame)) 51 | 52 | time.sleep(3) 53 | 54 | n_round -= 1 55 | 56 | ray.shutdown() 57 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/requirements.txt: -------------------------------------------------------------------------------- 1 | ms-swift 2 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/scripts/train/ds_stage_2.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "train_micro_batch_size_per_gpu": "auto", 14 | "train_batch_size": "auto", 15 | "gradient_accumulation_steps": "auto", 16 | "zero_optimization": { 17 | "stage": 2, 18 | "allgather_bucket_size": 1e8, 19 | "overlap_comm": true, 20 | "reduce_bucket_size": 1e8, 21 | "contiguous_gradients": true, 22 | "sub_group_size": 1e9 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/scripts/train/ds_stage_3.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "train_micro_batch_size_per_gpu": "auto", 14 | "train_batch_size": "auto", 15 | "gradient_accumulation_steps": "auto", 16 | "zero_optimization": { 17 | "stage": 3, 18 | "overlap_comm": true, 19 | "contiguous_gradients": true, 20 | "sub_group_size": 1e9, 21 | "reduce_bucket_size": "auto", 22 | "stage3_prefetch_bucket_size": "auto", 23 | "stage3_param_persistence_threshold": "auto", 24 | "stage3_max_live_parameters": 1e9, 25 | "stage3_max_reuse_distance": 1e9, 26 | "stage3_gather_16bit_weights_on_model_save": true 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/scripts/train/run_ms_agent_single.sh: -------------------------------------------------------------------------------- 1 | CUDA_VISIBLE_DEVICES=0 \ 2 | python llm_sft.py \ 3 | --model_type modelscope-agent-7b \ 4 | --sft_type lora \ 5 | --output_dir runs \ 6 | --dataset damo/MSAgent-Bench \ 7 | --dataset_sample 20000 \ 8 | --dataset_test_size 0.02 \ 9 | --max_length 2048 \ 10 | --dtype bf16 \ 11 | --lora_rank 8 \ 12 | --lora_alpha 32 \ 13 | --lora_dropout_p 0.1 \ 14 | --batch_size 1 \ 15 | --learning_rate 1e-4 \ 16 | --gradient_accumulation_steps 16 \ 17 | --eval_steps 50 \ 18 | --save_steps 50 \ 19 | --save_total_limit 2 \ 20 | --logging_steps 20 \ 21 | --use_flash_attn true \ 22 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/scripts/train/run_qwen_ddp.sh: -------------------------------------------------------------------------------- 1 | nproc_per_node=4 2 | CUDA_VISIBLE_DEVICES=0,1,2,3 \ 3 | torchrun \ 4 | --nproc_per_node=$nproc_per_node \ 5 | --master_port 29500 \ 6 | llm_sft.py \ 7 | --model_type qwen-7b \ 8 | --sft_type lora \ 9 | --output_dir runs \ 10 | --dataset train.jsonl \ 11 | --dataset_sample 20000 \ 12 | --dataset_test_size 0.02 \ 13 | --max_length 2048 \ 14 | --dtype bf16 \ 15 | --lora_rank 8 \ 16 | --lora_alpha 32 \ 17 | --lora_dropout_p 0.1 \ 18 | --batch_size 1 \ 19 | --learning_rate 1e-4 \ 20 | --eval_steps 50 \ 21 | --save_steps 50 \ 22 | --save_total_limit 2 \ 23 | --logging_steps 20 \ 24 | --use_flash_attn true \ 25 | --ddp_backend nccl \ 26 | --gradient_accumulation_steps $(expr 16 / $nproc_per_node) \ 27 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/scripts/train/run_qwen_ds_stage2.sh: -------------------------------------------------------------------------------- 1 | nproc_per_node=4 2 | CUDA_VISIBLE_DEVICES=0,1,2,3 \ 3 | torchrun \ 4 | --nproc_per_node=$nproc_per_node \ 5 | --master_port 29500 \ 6 | llm_sft.py \ 7 | --model_type qwen-7b \ 8 | --sft_type lora \ 9 | --output_dir runs \ 10 | --dataset test_v2_plugins_sample.json \ 11 | --dataset_sample -1 \ 12 | --max_length 2048 \ 13 | --lora_rank 8 \ 14 | --lora_alpha 32 \ 15 | --lora_dropout_p 0.1 \ 16 | --batch_size 1 \ 17 | --learning_rate 1e-4 \ 18 | --gradient_accumulation_steps 16 \ 19 | --eval_steps 50 \ 20 | --save_steps 50 \ 21 | --save_total_limit 2 \ 22 | --logging_steps 10 \ 23 | --use_flash_attn false \ 24 | --ddp_backend nccl \ 25 | --gradient_accumulation_steps $(expr 16 / $nproc_per_node) \ 26 | --deepspeed 'scripts/train/ds_stage_2.json' \ 27 | --save_only_model true \ 28 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/scripts/train/run_qwen_ds_stage3.sh: -------------------------------------------------------------------------------- 1 | nproc_per_node=4 2 | CUDA_VISIBLE_DEVICES=0,1,2,3 \ 3 | torchrun \ 4 | --nproc_per_node=$nproc_per_node \ 5 | --master_port 29500 \ 6 | llm_sft.py \ 7 | --model_type qwen-7b \ 8 | --sft_type lora \ 9 | --output_dir runs \ 10 | --dataset test_v2_plugins_sample.json \ 11 | --dataset_sample -1 \ 12 | --max_length 2048 \ 13 | --lora_rank 8 \ 14 | --lora_alpha 32 \ 15 | --lora_dropout_p 0.1 \ 16 | --batch_size 1 \ 17 | --learning_rate 1e-4 \ 18 | --gradient_accumulation_steps 16 \ 19 | --eval_steps 50 \ 20 | --save_steps 50 \ 21 | --save_total_limit 2 \ 22 | --logging_steps 10 \ 23 | --use_flash_attn false \ 24 | --ddp_backend nccl \ 25 | --gradient_accumulation_steps $(expr 16 / $nproc_per_node) \ 26 | --deepspeed 'scripts/train/ds_stage_3.json' \ 27 | --save_only_model true \ 28 | -------------------------------------------------------------------------------- /examples/llms/finetune_llm/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .dataset import (get_ms_tool_dataset, get_ms_tool_dataset_test, 2 | process_dataset, tokenize_function) 3 | from .models import MODEL_MAPPING, get_model_tokenizer 4 | from .utils import (DEFAULT_PROMPT, broadcast_string, data_collate_fn, 5 | evaluate, find_all_linear_for_lora, get_dist_setting, 6 | inference, is_dist, plot_images, select_bnb, select_dtype, 7 | show_layers) 8 | -------------------------------------------------------------------------------- /examples/llms/multi_llm_with_alpha_umi/run_deploy.sh: -------------------------------------------------------------------------------- 1 | export PYTHONPATH=./ 2 | 3 | export VLLM_USE_MODELSCOPE=True 4 | python -m vllm.entrypoints.openai.api_server \ 5 | --model=iic/alpha-umi-planner-7b \ 6 | --revision=v1.0.0 --trust-remote-code \ 7 | --port 8090 \ 8 | --dtype float16 \ 9 | --gpu-memory-utilization 0.3 > planner.log & 10 | 11 | python -m vllm.entrypoints.openai.api_server \ 12 | --model=iic/alpha-umi-caller-7b \ 13 | --revision=v1.0.0 --trust-remote-code \ 14 | --port 8091 \ 15 | --dtype float16 \ 16 | --gpu-memory-utilization 0.3 > caller.log & 17 | 18 | python -m vllm.entrypoints.openai.api_server \ 19 | --model=iic/alpha-umi-summarizer-7b \ 20 | --revision=v1.0.0 --trust-remote-code \ 21 | --port 8092 \ 22 | --dtype float16 \ 23 | --gpu-memory-utilization 0.3 > summarizer.log & 24 | -------------------------------------------------------------------------------- /examples/llms/multi_llm_with_alpha_umi/run_test.sh: -------------------------------------------------------------------------------- 1 | export PYTHONPATH=./ 2 | export RAPID_API_TOKEN="your rapid api token here" 3 | export MODELSCOPE_API_TOKEN="your modelscope api token here" 4 | 5 | python demo/alpha_umi/test_alpha_umi.py 6 | -------------------------------------------------------------------------------- /examples/llms/multi_llm_with_alpha_umi/test_alpha_umi.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | from modelscope_agent.agents.alpha_umi import AlphaUmi 5 | from openai import OpenAI 6 | 7 | llm_configs = { 8 | 'planner_llm_config': { 9 | 'model': 'iic/alpha-umi-planner-7b', 10 | 'model_server': 'openai', 11 | 'api_base': 'http://localhost:8090/v1', 12 | 'is_chat': False 13 | }, 14 | 'caller_llm_config': { 15 | 'model': 'iic/alpha-umi-caller-7b', 16 | 'model_server': 'openai', 17 | 'api_base': 'http://localhost:8091/v1', 18 | 'is_chat': False 19 | }, 20 | 'summarizer_llm_config': { 21 | 'model': 'iic/alpha-umi-summarizer-7b', 22 | 'model_server': 'openai', 23 | 'api_base': 'http://localhost:8092/v1', 24 | 'is_chat': False 25 | }, 26 | } 27 | 28 | 29 | def test_alpha_umi(): 30 | function_list = [ 31 | 'get_data_fact_for_numbers', 'get_math_fact_for_numbers', 32 | 'get_year_fact_for_numbers', 'listquotes_for_current_exchange', 33 | 'exchange_for_current_exchange' 34 | ] 35 | 36 | bot = AlphaUmi( 37 | function_list=function_list, 38 | llm_planner=llm_configs['planner_llm_config'], 39 | llm_caller=llm_configs['caller_llm_config'], 40 | llm_summarizer=llm_configs['summarizer_llm_config'], 41 | ) 42 | 43 | response = bot.run('how many CNY can I exchange for 1 US dollar? \ 44 | also, give me a special property about the number of CNY after exchange' 45 | ) 46 | 47 | for chunk in response: 48 | print(chunk) 49 | 50 | 51 | if __name__ == '__main__': 52 | 53 | test_alpha_umi() 54 | -------------------------------------------------------------------------------- /examples/mcp_apps/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "mcpServers": { 3 | "amap-maps": { 4 | "type": "sse", 5 | "url": "https://mcp.api-inference.modelscope.cn/sse/6d2f001a63354d" 6 | }, 7 | "MiniMax-MCP": { 8 | "type": "sse", 9 | "url": "https://mcp.api-inference.modelscope.cn/sse/0c73b1853bab4b" 10 | }, 11 | "edgeone-pages-mcp-server": { 12 | "command": "npx", 13 | "args": ["edgeone-pages-mcp"] 14 | }, 15 | "notebook": {} 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /modelscope_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent import Agent 2 | 3 | 4 | def _create_remote(cls, 5 | name, 6 | max_concurrency=1, 7 | force_new=False, 8 | *args, 9 | **kwargs): 10 | ''' 11 | Create a remote actor by ray 12 | Args: 13 | cls: the class to be created 14 | name: the name of ray actor, also the role name 15 | max_concurrency: max concurrency of the actor 16 | focus_new: force to create a new actor 17 | *args: 18 | **kwargs: 19 | 20 | Returns: 21 | 22 | ''' 23 | import ray 24 | try: 25 | # try to get an existing actor 26 | ray_actor = ray.get_actor(name) 27 | if force_new: 28 | ray.kill(ray_actor) 29 | else: 30 | return ray_actor 31 | except ValueError: 32 | pass 33 | # if failed, create a new actor 34 | return ray.remote( 35 | name=name, 36 | max_concurrency=max_concurrency)(cls).remote(*args, **kwargs) 37 | 38 | 39 | def _create_local(cls, *args, **kwargs): 40 | ''' 41 | Create a local object 42 | Args: 43 | cls: the class to be created 44 | *args: 45 | **kwargs: 46 | 47 | Returns: 48 | 49 | ''' 50 | return cls(*args, **kwargs) 51 | 52 | 53 | def create_component(cls, 54 | name, 55 | remote=False, 56 | max_concurrency=1, 57 | prefix_name=None, 58 | *args, 59 | **kwargs): 60 | kwargs['remote'] = remote 61 | kwargs['role'] = name 62 | kwargs['prefix_name'] = prefix_name 63 | if remote: 64 | if prefix_name is not None: 65 | name = f'{prefix_name}_{name}' 66 | return _create_remote(cls, name, max_concurrency, *args, **kwargs) 67 | else: 68 | return _create_local(cls, *args, **kwargs) 69 | -------------------------------------------------------------------------------- /modelscope_agent/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from .agent_builder import AgentBuilder 2 | from .role_play import RolePlay 3 | -------------------------------------------------------------------------------- /modelscope_agent/agents/codexgraph_agent/__init__.py: -------------------------------------------------------------------------------- 1 | from .cypher_agent import CypherAgent 2 | from .task.code_chat import CodexGraphAgentChat 3 | from .task.code_debugger import CodexGraphAgentDebugger 4 | from .task.code_general import (CodexGraphAgentCommenter, 5 | CodexGraphAgentGeneral, 6 | CodexGraphAgentGenerator, 7 | CodexGraphAgentUnitTester) 8 | -------------------------------------------------------------------------------- /modelscope_agent/agents/codexgraph_agent/prompt.py: -------------------------------------------------------------------------------- 1 | CYPHER_PROMPT = """[start_of_cypher_queries] 2 | ### Query 1 3 | **decomposed text query**: 4 | ```cypher 5 | 6 | ``` 7 | 8 | ### Query 2 9 | **decomposed text query**: 10 | ```cypher 11 | 12 | ``` 13 | ... 14 | 15 | ### Query n 16 | **decomposed text query**: 17 | ```cypher 18 | 19 | ``` 20 | [end_of_cypher_queries] 21 | """ 22 | 23 | JSON_PROMPT = """```json 24 | {{"thought": $THOUGHT, "action": $ACTION_NAME, "action_input": $INPUT}} 25 | ``` 26 | """ 27 | -------------------------------------------------------------------------------- /modelscope_agent/agents/codexgraph_agent/task/__init__.py: -------------------------------------------------------------------------------- 1 | from .code_general import CodexGraphAgentGeneral 2 | -------------------------------------------------------------------------------- /modelscope_agent/agents/codexgraph_agent/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/agents/codexgraph_agent/utils/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/agents/codexgraph_agent/utils/cypher_utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | def add_label_to_nodes(cypher_query, new_label): 5 | split_query = cypher_query.split('RETURN', 1) 6 | 7 | if len(split_query) < 2: 8 | match_part = split_query[0] 9 | return_part = '' 10 | else: 11 | match_part = split_query[0] 12 | return_part = 'RETURN' + split_query[1] 13 | 14 | pattern = r'(\(\s*\w*\s*)(:[^{}()]*)?(\{[^{}()]*\})?\)' 15 | 16 | def replace_label(match): 17 | before_label = match.group(1) 18 | existing_labels = match.group(2) if match.group(2) else '' 19 | properties = match.group(3) if match.group(3) else '' 20 | 21 | if new_label not in existing_labels: 22 | if existing_labels: 23 | new_labels = f':{new_label}{existing_labels}' 24 | else: 25 | new_labels = f':{new_label}' 26 | else: 27 | new_labels = existing_labels 28 | 29 | return f'{before_label}{new_labels}{properties})' 30 | 31 | updated_match_part = re.sub(pattern, replace_label, match_part) 32 | updated_query = updated_match_part + return_part 33 | 34 | return updated_query 35 | 36 | 37 | def extract_cypher_queries(given_text): 38 | pattern = re.compile(r'```cypher(.*?)```', re.DOTALL) 39 | return [match.strip() for match in pattern.findall(given_text)] 40 | -------------------------------------------------------------------------------- /modelscope_agent/agents/gen_keyword.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.agent import Agent 2 | 3 | PROMPT_TEMPLATE_ZH = """ 4 | 请提取问题中的关键词,需要中英文均有,可以适量补充不在问题中但相关的关键词。关键词尽量切分为动词/名词/形容词等类型,不要长词组。 5 | 关键词以JSON的格式给出,比如{{"keywords_zh": ["关键词1", "关键词2"], "keywords_en": ["keyword 1", "keyword 2"]}} 6 | 7 | 8 | Question:这篇文章的作者是谁? 9 | Keywords:{{"keywords_zh": ["作者"], "keywords_en": ["author"]}} 10 | 11 | Question:解释下图一 12 | Keywords:{{"keywords_zh": ["图一", "图 1"], "keywords_en": ["Figure 1"]}} 13 | 14 | Question:核心公式 15 | Keywords:{{"keywords_zh": ["核心公式", "公式"], "keywords_en": ["core formula", "formula", "equation"]}} 16 | 17 | Question:{user_request} 18 | Keywords: 19 | """ 20 | 21 | PROMPT_TEMPLATE_EN = """Please extract keywords from the question, both in Chinese and English, 22 | and supplement them appropriately with relevant keywords that are not in the question. 23 | Try to divide keywords into verb/noun/adjective types and avoid long phrases. Keywords are provided in JSON format, 24 | such as {{"keywords_zh": ["关键词1", "关键词2"], "keywords_en": ["keyword 1", "keyword 2"]}} 25 | 26 | Question: Who are the authors of this article? 27 | Keywords:{{"keywords_zh": ["作者"], "keywords_en": ["author"]}} 28 | 29 | Question: Explain Figure 1 30 | Keywords:{{"keywords_zh": ["图一", "图 1"], "keywords_en": ["Figure 1"]}} 31 | 32 | Question: core formula 33 | Keywords:{{"keywords_zh": ["核心公式", "公式"], "keywords_en": ["core formula", "formula", "equation"]}} 34 | 35 | Question:{user_request} 36 | Keywords: 37 | """ 38 | 39 | PROMPT_TEMPLATE = { 40 | 'zh': PROMPT_TEMPLATE_ZH, 41 | 'en': PROMPT_TEMPLATE_EN, 42 | } 43 | 44 | 45 | class GenKeyword(Agent): 46 | 47 | def _run(self, user_request, lang: str = 'en'): 48 | prompt = PROMPT_TEMPLATE[lang].format(user_request=user_request, ) 49 | return self._call_llm(prompt) 50 | -------------------------------------------------------------------------------- /modelscope_agent/agents/mobile_agent_v2/__init__.py: -------------------------------------------------------------------------------- 1 | from .mobile_agent_v2 import MobileAgentV2 2 | -------------------------------------------------------------------------------- /modelscope_agent/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseCallback, CallbackManager 2 | -------------------------------------------------------------------------------- /modelscope_agent/constants.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from pathlib import Path 3 | 4 | DEFAULT_AGENT_ROOT = Path.home() / '.modelscope-agent' 5 | DEFAULT_LOG_STORAGE_PATH = DEFAULT_AGENT_ROOT / 'log' 6 | DEFAULT_SEND_TO = 'all' 7 | USER_REQUIREMENT = 'user_requirement' 8 | ENVIRONMENT_NAME = 'env' 9 | AGENT_REGISTRY_NAME = 'agent_center' 10 | TASK_CENTER_NAME = 'task_center' 11 | DEFAULT_TOOL_MANAGER_SERVICE_URL = 'http://localhost:31511' 12 | DEFAULT_ASSISTANT_SERVICE_URL = 'http://localhost:31512' 13 | MODELSCOPE_AGENT_TOKEN_HEADER_NAME = 'X-Modelscope-Agent-Token' 14 | DEFAULT_CODE_INTERPRETER_DIR = '/tmp/ci_workspace' 15 | LOCAL_FILE_PATHS = 'local_file_paths' 16 | BASE64_FILES = 'base64_files' 17 | 18 | 19 | class ApiNames(Enum): 20 | dashscope_api_key = 'DASHSCOPE_API_KEY' 21 | modelscope_api_key = 'MODELSCOPE_API_TOKEN' 22 | amap_api_key = 'AMAP_TOKEN' 23 | bing_api_key = 'BING_SEARCH_V7_SUBSCRIPTION_KEY' 24 | zhipu_api_key = 'ZHIPU_API_KEY' 25 | -------------------------------------------------------------------------------- /modelscope_agent/environment/__init__.py: -------------------------------------------------------------------------------- 1 | from .environment import Environment 2 | -------------------------------------------------------------------------------- /modelscope_agent/environment/android_adb/__init__.py: -------------------------------------------------------------------------------- 1 | from .android_adb_env import ADBEnvironment 2 | -------------------------------------------------------------------------------- /modelscope_agent/environment/graph_database/__init__.py: -------------------------------------------------------------------------------- 1 | from .graph_database import GraphDatabaseHandler 2 | -------------------------------------------------------------------------------- /modelscope_agent/environment/graph_database/ast_search/__init__.py: -------------------------------------------------------------------------------- 1 | from .ast_manage import AstManager 2 | -------------------------------------------------------------------------------- /modelscope_agent/environment/graph_database/indexer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/environment/graph_database/indexer/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/environment/graph_database/indexer/sourcetraildb.py: -------------------------------------------------------------------------------- 1 | DEFINITION_IMPLICIT = 1 2 | DEFINITION_EXPLICIT = 2 3 | SYMBOL_TYPE = 3 4 | SYMBOL_BUILTIN_TYPE = 4 5 | SYMBOL_MODULE = 5 6 | SYMBOL_NAMESPACE = 6 7 | SYMBOL_PACKAGE = 7 8 | SYMBOL_STRUCT = 8 9 | SYMBOL_CLASS = 9 10 | SYMBOL_INTERFACE = 10 11 | SYMBOL_ANNOTATION = 11 12 | SYMBOL_GLOBAL_VARIABLE = 12 13 | SYMBOL_FIELD = 13 14 | SYMBOL_FUNCTION = 14 15 | SYMBOL_METHOD = 15 16 | SYMBOL_ENUM = 16 17 | SYMBOL_ENUM_CONSTANT = 17 18 | SYMBOL_TYPEDEF = 18 19 | SYMBOL_TYPE_PARAMETER = 19 20 | SYMBOL_MACRO = 20 21 | SYMBOL_UNION = 21 22 | REFERENCE_TYPE_USAGE = 22 23 | REFERENCE_USAGE = 23 24 | REFERENCE_CALL = 24 25 | REFERENCE_INHERITANCE = 25 26 | REFERENCE_OVERRIDE = 26 27 | REFERENCE_TYPE_ARGUMENT = 27 28 | REFERENCE_TEMPLATE_SPECIALIZATION = 28 29 | REFERENCE_INCLUDE = 29 30 | REFERENCE_IMPORT = 30 31 | REFERENCE_MACRO_USAGE = 31 32 | REFERENCE_ANNOTATION_USAGE = 32 33 | -------------------------------------------------------------------------------- /modelscope_agent/llm/__init__.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from .base import LLM_REGISTRY, BaseChatModel 4 | from .dashscope import DashScopeLLM, QwenChatAtDS 5 | from .modelscope import ModelScopeChatGLM, ModelScopeLLM 6 | from .ollama import OllamaLLM 7 | from .openai import OpenAi 8 | from .openai_fn_call import TextChatAtOAI 9 | from .vllm import VllmLLM 10 | from .zhipu import ZhipuLLM 11 | 12 | 13 | def get_chat_model(model: str, model_server: str, **kwargs) -> BaseChatModel: 14 | """ 15 | model: the model name: such as qwen-max, gpt-4 ... 16 | model_server: the source of model, such as dashscope, openai, modelscope ... 17 | **kwargs: more parameters, such as api_key, api_base 18 | """ 19 | model_type = re.split(r'[-/_]', model)[0] # parser qwen / gpt / ... 20 | registered_model_id = f'{model_server}_{model_type}' 21 | 22 | if registered_model_id in LLM_REGISTRY: # specific model from specific source 23 | return LLM_REGISTRY[registered_model_id](model, model_server, **kwargs) 24 | elif model_server in LLM_REGISTRY: # specific source 25 | return LLM_REGISTRY[model_server](model, model_server, **kwargs) 26 | else: 27 | raise NotImplementedError 28 | 29 | 30 | __all__ = [ 31 | 'LLM_REGISTRY', 'BaseChatModel', 'OpenAi', 'DashScopeLLM', 'QwenChatAtDS', 32 | 'ModelScopeLLM', 'ModelScopeChatGLM', 'ZhipuLLM', 'OllamaLLM', 'VllmLLM' 33 | ] 34 | -------------------------------------------------------------------------------- /modelscope_agent/llm/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/llm/utils/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/memory/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Memory 2 | from .memory_with_rag import MemoryWithRag 3 | from .memory_with_retrieval_knowledge import MemoryWithRetrievalKnowledge 4 | -------------------------------------------------------------------------------- /modelscope_agent/multi_agents_utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/multi_agents_utils/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/multi_agents_utils/executors/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/multi_agents_utils/executors/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/rag/__init__.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.utils.nltk_utils import install_nltk_data 2 | 3 | # install nltk data 4 | 5 | install_nltk_data() 6 | -------------------------------------------------------------------------------- /modelscope_agent/rag/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Any, Dict, List, Union 3 | 4 | from llama_index.core import SimpleDirectoryReader, VectorStoreIndex 5 | from llama_index.core.llama_pack.base import BaseLlamaPack 6 | from llama_index.core.readers.base import BaseReader 7 | 8 | 9 | class Knowledge(BaseLlamaPack): 10 | """ rag pipeline. 11 | 12 | 从不同的源加载知识,支持:文件夹路径(str),文件路径列表(list),将不同源配置到不同的召回方式(dict). 13 | Automatically select the best file reader given file extensions. 14 | 15 | Args: 16 | knowledge_source: Path to the directory,或文件路径列表,或指定召回方式的文件路径。 17 | cache_dir: 缓存indexing后的信息。 18 | """ 19 | 20 | def __init__(self, 21 | knowledge_source: Union[List, str, Dict], 22 | cache_dir: str = './run', 23 | **kwargs) -> None: 24 | 25 | # extra_readers = self.get_extra_readers() 26 | self.documents = [] 27 | if isinstance(knowledge_source, str): 28 | if os.path.exists(knowledge_source): 29 | self.documents.append( 30 | SimpleDirectoryReader( 31 | input_dir=knowledge_source, 32 | recursive=True).load_data()) 33 | 34 | self.documents = SimpleDirectoryReader( 35 | input_files=knowledge_source).load_data() 36 | 37 | def get_extra_readers(self) -> Dict[str, BaseReader]: 38 | return {} 39 | 40 | def get_modules(self) -> Dict[str, Any]: 41 | """Get modules for rewrite.""" 42 | return { 43 | 'node_parser': self.node_parser, 44 | 'recursive_retriever': self.recursive_retriever, 45 | 'query_engines': self.query_engines, 46 | 'reader': self.path_reader, 47 | } 48 | 49 | def run(self, query: str, **kwargs) -> str: 50 | return self.query_engine.query(query, **kwargs) 51 | -------------------------------------------------------------------------------- /modelscope_agent/rag/rag_template/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/rag/rag_template/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/rag/rag_template/best_match.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Optional 2 | 3 | from llama_index.core import StorageContext, load_index_from_storage 4 | from llama_index.core.base.base_retriever import BaseRetriever 5 | from llama_index.core.llms.llm import LLM 6 | from llama_index.core.node_parser import SentenceSplitter 7 | from llama_index.core.schema import Document 8 | from modelscope_agent.llm import get_chat_model 9 | from modelscope_agent.rag.knowledge import BaseKnowledge 10 | 11 | 12 | class BestMatchKnowledge(BaseKnowledge): 13 | 14 | def get_root_retriever(self, 15 | documents: List[Document], 16 | cache_dir: str, 17 | llm: LLM, 18 | chunk_size: int = 200, 19 | similarity_top_k=2, 20 | **kwargs) -> BaseRetriever: 21 | from llama_index.retrievers.bm25 import BM25Retriever 22 | 23 | self.splitter = SentenceSplitter(chunk_size=chunk_size) 24 | nodes = self.splitter.get_nodes_from_documents(documents) 25 | 26 | # initialize storage context (by default it's in-memory) 27 | storage_context = StorageContext.from_defaults(persist_dir=cache_dir) 28 | storage_context.docstore.add_documents(nodes) 29 | if cache_dir is not None: 30 | storage_context.persist(persist_dir=cache_dir) 31 | 32 | # We can pass in the index, doctore, or list of nodes to create the retriever 33 | return BM25Retriever.from_defaults( 34 | docstore=storage_context.docstore, 35 | similarity_top_k=similarity_top_k) 36 | 37 | 38 | if __name__ == '__main__': 39 | llm_config = {'model': 'qwen-max', 'model_server': 'dashscope'} 40 | llm = get_chat_model(**llm_config) 41 | 42 | knowledge = BestMatchKnowledge('./data/常见QA.pdf', llm=llm) 43 | 44 | print(knowledge.run('如何创建agent', files=[])) 45 | print('-----------------------') 46 | -------------------------------------------------------------------------------- /modelscope_agent/rag/reader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/rag/reader/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/storage/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import BaseStorage 2 | from .file_storage import DocumentStorage 3 | from .vector_storage import KnowledgeVector, VectorStorage 4 | -------------------------------------------------------------------------------- /modelscope_agent/storage/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class BaseStorage(ABC): 5 | 6 | @abstractmethod 7 | def add(self, *args, **kwargs): 8 | """add items to db or indexer""" 9 | pass 10 | 11 | @abstractmethod 12 | def search(self, *args, **kwargs): 13 | """search from db or indexer""" 14 | pass 15 | 16 | @abstractmethod 17 | def delete(self, *args, **kwargs): 18 | """delete data from db or indexer""" 19 | pass 20 | -------------------------------------------------------------------------------- /modelscope_agent/tools/code_interpreter/AlibabaPuHuiTi-3-45-Light.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/tools/code_interpreter/AlibabaPuHuiTi-3-45-Light.ttf -------------------------------------------------------------------------------- /modelscope_agent/tools/code_interpreter/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from modelscope_agent.utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'code_interpreter': ['CodeInterpreter'], 7 | } 8 | 9 | sys.modules[__name__] = _LazyModule( 10 | __name__, 11 | globals()['__file__'], 12 | _import_structure, 13 | module_spec=__spec__, 14 | ) 15 | -------------------------------------------------------------------------------- /modelscope_agent/tools/code_interpreter/code_interpreter_init_kernel.py: -------------------------------------------------------------------------------- 1 | import math # noqa 2 | import os # noqa 3 | import re # noqa 4 | import signal 5 | 6 | import json # noqa 7 | import matplotlib # noqa 8 | import matplotlib.pyplot as plt 9 | import numpy as np # noqa 10 | import pandas as pd # noqa 11 | import seaborn as sns 12 | from matplotlib.font_manager import FontProperties 13 | from sympy import Eq, solve, symbols # noqa 14 | 15 | 16 | def input(*args, **kwargs): # noqa 17 | raise NotImplementedError('Python input() function is disabled.') 18 | 19 | 20 | def _m6_timout_handler(_signum=None, _frame=None): 21 | raise TimeoutError('M6_CODE_INTERPRETER_TIMEOUT') 22 | 23 | 24 | try: 25 | signal.signal(signal.SIGALRM, _m6_timout_handler) 26 | except AttributeError: # windows 27 | pass 28 | 29 | 30 | class _M6CountdownTimer: 31 | 32 | @classmethod 33 | def start(cls, timeout: int): 34 | try: 35 | signal.alarm(timeout) 36 | except AttributeError: # windows 37 | pass # TODO: I haven't found a solution that works with jupyter yet. 38 | 39 | @classmethod 40 | def cancel(cls): 41 | try: 42 | signal.alarm(0) 43 | except AttributeError: # windows 44 | pass # TODO 45 | 46 | 47 | sns.set_theme() 48 | 49 | _m6_font_prop = FontProperties(fname='{{M6_FONT_PATH}}') 50 | plt.rcParams['font.family'] = _m6_font_prop.get_name() 51 | -------------------------------------------------------------------------------- /modelscope_agent/tools/contrib/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from modelscope_agent.utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'demo': ['AliyunRenewInstanceTool'], 7 | } 8 | 9 | sys.modules[__name__] = _LazyModule( 10 | __name__, 11 | globals()['__file__'], 12 | _import_structure, 13 | module_spec=__spec__, 14 | ) 15 | -------------------------------------------------------------------------------- /modelscope_agent/tools/contrib/demo/README.md: -------------------------------------------------------------------------------- 1 | # aliyun renew method tool 2 | 3 | this tool requires aliyun account, please get the aliyun account firstly. 4 | -------------------------------------------------------------------------------- /modelscope_agent/tools/contrib/demo/__init__.py: -------------------------------------------------------------------------------- 1 | from .renew_aliyun_instance import AliyunRenewInstanceTool 2 | -------------------------------------------------------------------------------- /modelscope_agent/tools/contrib/demo/renew_aliyun_instance.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.tools.base import BaseTool, register_tool 2 | 3 | 4 | @register_tool('RenewInstance') 5 | class AliyunRenewInstanceTool(BaseTool): 6 | description = '续费一台包年包月ECS实例' 7 | name = 'RenewInstance' 8 | parameters: list = [{ 9 | 'name': 'instance_id', 10 | 'description': 'ECS实例ID', 11 | 'required': True, 12 | 'type': 'string' 13 | }, { 14 | 'name': 'period', 15 | 'description': '续费时长以月为单位', 16 | 'required': True, 17 | 'type': 'string' 18 | }] 19 | 20 | def call(self, params: str, **kwargs): 21 | params = self._verify_args(params) 22 | instance_id = params['instance_id'] 23 | period = params['period'] 24 | return str({'result': f'已完成ECS实例ID为{instance_id}的续费,续费时长{period}月'}) 25 | -------------------------------------------------------------------------------- /modelscope_agent/tools/contrib/demo/test_case.py: -------------------------------------------------------------------------------- 1 | # write your test case here 2 | -------------------------------------------------------------------------------- /modelscope_agent/tools/dashscope_tools/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from modelscope_agent.utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'image_enhancement': ['ImageEnhancement'], 7 | 'image_generation': ['TextToImageTool'], 8 | 'image_generation_lite': ['TextToImageLiteTool'], 9 | 'qwen_vl': ['QWenVL'], 10 | 'style_repaint': ['StyleRepaint'], 11 | 'wordart_tool': ['WordArtTexture'], 12 | 'sambert_tts_tool': ['SambertTtsTool'], 13 | 'paraformer_asr_tool': ['ParaformerAsrTool'] 14 | } 15 | 16 | sys.modules[__name__] = _LazyModule( 17 | __name__, 18 | globals()['__file__'], 19 | _import_structure, 20 | module_spec=__spec__, 21 | ) 22 | -------------------------------------------------------------------------------- /modelscope_agent/tools/hf_tool.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, List 2 | 3 | import json 4 | from modelscope_agent.tools.base import BaseTool, register_tool 5 | 6 | 7 | @register_tool('hf-tool') 8 | class HFTool(BaseTool): 9 | """Simple wrapper for huggingface transformers tools 10 | 11 | """ 12 | 13 | def __init__(self, tool, description: str, name: str, 14 | parameters: List[Dict]): 15 | try: 16 | from transformers.tools import Tool as HFTool 17 | except ImportError: 18 | try: 19 | from transformers import Tool as HFTool 20 | except ImportError as e: 21 | raise ImportError( 22 | "The package 'transformers' is required for this module. Please install it using 'pip install " 23 | "transformers>=4.33'.") from e 24 | self.tool = tool 25 | self.description = description 26 | self.name = name 27 | self.parameters = parameters 28 | super().__init__() 29 | 30 | def call(self, params: str, **kwargs) -> str: 31 | params = self._verify_args(params) 32 | return json.dumps(self.tool(**params), ensure_ascii=False) 33 | -------------------------------------------------------------------------------- /modelscope_agent/tools/langchain_proxy_tool.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | 3 | from modelscope_agent.tools.base import BaseTool, register_tool 4 | 5 | 6 | @register_tool('langchain_tool') 7 | class LangchainTool(BaseTool): 8 | description = '通过调用langchain插件来支持对语言模型的输入输出格式进行处理,输入文本字符,输出经过格式处理的结果' 9 | name = 'plugin' 10 | parameters: list = [{ 11 | 'name': 'commands', 12 | 'description': '需要进行格式处理的文本字符列表', 13 | 'required': True, 14 | 'type': 'string' 15 | }] 16 | 17 | def __init__(self, langchain_tool): 18 | from langchain_community.tools import BaseTool 19 | 20 | if not isinstance(langchain_tool, BaseTool): 21 | raise ValueError('langchain_tool should be type of langchain tool') 22 | self.langchain_tool = langchain_tool 23 | self.parse_langchain_schema() 24 | super().__init__() 25 | 26 | def parse_langchain_schema(self): 27 | # convert langchain tool schema to modelscope_agent tool schema 28 | self.description = self.langchain_tool.description 29 | self.name = self.langchain_tool.name 30 | self.parameters = [] 31 | for name, arg in self.langchain_tool.args.items(): 32 | tool_arg = deepcopy(arg) 33 | tool_arg['name'] = name 34 | tool_arg['required'] = True 35 | if 'type' not in arg: 36 | tool_arg['type'] = arg['anyOf'][0].get('type', 'string') 37 | tool_arg.pop('title') 38 | self.parameters.append(tool_arg) 39 | 40 | def call(self, params: str, **kwargs): 41 | params = self._verify_args(params) 42 | res = self.langchain_tool.run(params) 43 | return res 44 | -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/__init__.py: -------------------------------------------------------------------------------- 1 | from .mcp_client import MCPClient 2 | from .mcp_manager import MCPManager 3 | -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/servers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/tools/mcp/servers/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/servers/crawl4ai/README.md: -------------------------------------------------------------------------------- 1 | # How-to-use 2 | 3 | ## Installation 4 | 5 | ```shell 6 | pip install -r requirements.txt 7 | crawl4ai-setup 8 | crawl4ai-doctor 9 | ``` 10 | 11 | ## Dev 12 | 13 | ```shell 14 | fastmcp dev server.py 15 | ``` 16 | 17 | ## Run 18 | 19 | Please copy the content of config.json and change the path to your actual local file path 20 | -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/servers/crawl4ai/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/tools/mcp/servers/crawl4ai/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/servers/crawl4ai/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "crawl4ai": { 3 | "command": "/path/to/fastmcp", 4 | "args": [ 5 | "run", 6 | "/path/to/crawl4ai/server.py" 7 | ] 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/servers/crawl4ai/requirements.txt: -------------------------------------------------------------------------------- 1 | crawl4ai 2 | fastmcp 3 | trafilatura 4 | -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/servers/notebook/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/tools/mcp/servers/notebook/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/servers/notebook/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "notebook": { 3 | "command": "/path/to/fastmcp", 4 | "args": [ 5 | "run", 6 | "/path/to/notebook/server.py" 7 | ] 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /modelscope_agent/tools/mcp/utils.py: -------------------------------------------------------------------------------- 1 | def fix_json_brackets(json_str): 2 | # 初始化堆栈和结果字符串 3 | stack = [] 4 | result = [] 5 | 6 | # 遍历字符串中的每个字符 7 | for char in json_str: 8 | if char in '{[': 9 | # 如果是开括号,压入堆栈并添加到结果中 10 | stack.append(char) 11 | result.append(char) 12 | elif char in '}]': 13 | # 如果是闭括号 14 | if not stack: 15 | # 如果堆栈为空,说明缺少开括号,跳过这个闭括号 16 | continue 17 | 18 | # 检查括号是否匹配 19 | if (char == '}' and stack[-1] == '{') or (char == ']' 20 | and stack[-1] == '['): 21 | # 括号匹配,弹出堆栈并添加到结果中 22 | stack.pop() 23 | result.append(char) 24 | else: 25 | # 括号不匹配,跳过这个闭括号 26 | continue 27 | else: 28 | # 其他字符直接添加到结果中 29 | result.append(char) 30 | 31 | # 处理堆栈中剩余的开括号 32 | while stack: 33 | # 为每个未匹配的开括号添加对应的闭括号 34 | open_bracket = stack.pop() 35 | result.append('}' if open_bracket == '{' else ']') 36 | 37 | return ''.join(result) 38 | -------------------------------------------------------------------------------- /modelscope_agent/tools/metagpt_tools/tool_data_type.py: -------------------------------------------------------------------------------- 1 | # this code is originally from https://github.com/geekan/MetaGPT 2 | from pydantic import BaseModel 3 | 4 | 5 | class ToolSchema(BaseModel): 6 | description: str 7 | 8 | 9 | class Tool(BaseModel): 10 | name: str 11 | path: str 12 | schemas: dict = {} 13 | code: str = '' 14 | tags: list[str] = [] 15 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from modelscope_agent.utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'image_chat_tool': ['ImageChatTool'], 7 | 'text_address_tool': ['TextAddressTool'], 8 | 'text_ie_tool': ['TextInfoExtractTool'], 9 | 'text_ner_tool': ['TextNerTool'], 10 | 'text_to_speech_tool': ['TexttoSpeechTool'], 11 | 'text_to_video_tool': ['TextToVideoTool'], 12 | 'translation_en2zh_tool': ['TranslationEn2ZhTool'], 13 | 'translation_zh2en_tool': ['TranslationZh2EnTool'], 14 | } 15 | 16 | sys.modules[__name__] = _LazyModule( 17 | __name__, 18 | globals()['__file__'], 19 | _import_structure, 20 | module_spec=__spec__, 21 | ) 22 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/text_address_tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | from modelscope_agent.tools.base import register_tool 3 | 4 | from modelscope.utils.constant import Tasks 5 | from .pipeline_tool import ModelscopePipelineTool 6 | 7 | 8 | @register_tool('text-address') 9 | class TextAddressTool(ModelscopePipelineTool): 10 | default_model = 'damo/mgeo_geographic_elements_tagging_chinese_base' 11 | description = '地址解析服务,针对中文地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等' 12 | name = 'text-address' 13 | parameters: list = [{ 14 | 'name': 'input', 15 | 'description': '用户输入的地址信息', 16 | 'required': True, 17 | 'type': 'string' 18 | }] 19 | task = Tasks.token_classification 20 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/mgeo_geographic_elements_tagging_chinese_base' # noqa E501 21 | 22 | def call(self, params: str, **kwargs) -> str: 23 | result = super().call(params, **kwargs) 24 | address = {} 25 | for e in result['Data']['output']: 26 | address[e['type']] = e['span'] 27 | return json.dumps(address, ensure_ascii=False) 28 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/text_ie_tool.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from typing import Union 3 | 4 | from modelscope_agent.tools.base import register_tool 5 | 6 | from modelscope.utils.constant import Tasks 7 | from .pipeline_tool import ModelscopePipelineTool 8 | 9 | 10 | @register_tool('text-ie') 11 | class TextInfoExtractTool(ModelscopePipelineTool): 12 | default_model = 'damo/nlp_structbert_siamese-uie_chinese-base' 13 | description = '信息抽取服务,针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示' 14 | name = 'text-ie' 15 | parameters: list = [{ 16 | 'name': 'input', 17 | 'description': '用户输入的文本', 18 | 'required': True, 19 | 'type': 'string' 20 | }, { 21 | 'name': 'schema', 22 | 'description': '要抽取信息的json表示', 23 | 'required': True, 24 | 'type': 'dict' 25 | }] 26 | task = Tasks.siamese_uie 27 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/nlp_structbert_siamese-uie_chinese-base' 28 | 29 | def call(self, params: str, **kwargs) -> str: 30 | result = super().call(params, **kwargs) 31 | InfoExtract = defaultdict(list) 32 | for e in result['Data']['output']: 33 | InfoExtract[e[0]['type']].append(e[0]['span']) 34 | return str(dict(InfoExtract)) 35 | 36 | def _verify_args(self, params: str) -> Union[str, dict]: 37 | params = super()._verify_args(params) 38 | params['parameters'] = {'schema': params['schema']} 39 | params.pop('schema') 40 | return params 41 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/text_ner_tool.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | from modelscope_agent.tools.base import register_tool 4 | 5 | from modelscope.utils.constant import Tasks 6 | from .pipeline_tool import ModelscopePipelineTool 7 | 8 | 9 | @register_tool('text-ner') 10 | class TextNerTool(ModelscopePipelineTool): 11 | default_model = 'damo/nlp_raner_named-entity-recognition_chinese-base-cmeee' 12 | description = '命名实体识别服务,针对需要识别的中文文本,找出其中的实体,返回json格式结果' 13 | name = 'text-ner' 14 | parameters: list = [{ 15 | 'name': 'input', 16 | 'description': '用户输入的文本', 17 | 'required': True, 18 | 'type': 'string' 19 | }] 20 | task = Tasks.named_entity_recognition 21 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/nlp_raner_named-entity-recognition_chinese-base-cmeee' # noqa E501 22 | 23 | def _remote_call(self, params: str, **kwargs) -> str: 24 | result = super()._remote_call(params, **kwargs) 25 | ner = defaultdict(list) 26 | for e in result['Data']['output']: 27 | ner[e['type']].append(e['span']) 28 | return str(dict(ner)) 29 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/text_to_speech_tool.py: -------------------------------------------------------------------------------- 1 | import json5 2 | from modelscope_agent.tools.base import register_tool 3 | from modelscope_agent.tools.utils.output_wrapper import AudioWrapper 4 | 5 | from modelscope.utils.constant import Tasks 6 | from .pipeline_tool import ModelscopePipelineTool 7 | 8 | 9 | @register_tool('speech-generation') 10 | class TexttoSpeechTool(ModelscopePipelineTool): 11 | default_model = 'damo/speech_sambert-hifigan_tts_zh-cn_16k' 12 | description = '文本转语音服务,将文字转换为自然而逼真的语音,可配置男声/女声' 13 | name = 'speech-generation' 14 | parameters: list = [{ 15 | 'name': 'input', 16 | 'description': '要转成语音的文本', 17 | 'required': True, 18 | 'type': 'string' 19 | }, { 20 | 'name': 'voice', 21 | 'description': 22 | '允许的声音类型:zhitian_emo(女声),zhiyan_emo(少女声),zhizhe_emo(男声),zhibei_emo(男童声)。', 23 | 'required': True, 24 | 'type': 'string' 25 | }] 26 | task = Tasks.text_to_speech 27 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/speech_sambert-hifigan_tts_zh-cn_16k' 28 | 29 | def call(self, params: str, **kwargs) -> str: 30 | result = super().call(params, **kwargs) 31 | if result['Code'] != 200: 32 | print('speech_generation error: ', result) 33 | return None 34 | audio = result['Data']['output_wav'] 35 | return str(AudioWrapper(audio, **kwargs)) 36 | 37 | def _verify_args(self, params: str): 38 | # override the args 39 | params_json = super()._verify_args(params) 40 | params_json['parameters'] = {'voice': params_json.pop('voice')} 41 | return params_json 42 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/text_to_video_tool.py: -------------------------------------------------------------------------------- 1 | import json 2 | from modelscope_agent.tools.base import register_tool 3 | from modelscope_agent.tools.utils.output_wrapper import VideoWrapper 4 | 5 | from modelscope.utils.constant import Tasks 6 | from .pipeline_tool import ModelscopePipelineTool 7 | 8 | 9 | @register_tool('video-generation') 10 | class TextToVideoTool(ModelscopePipelineTool): 11 | default_model = 'damo/text-to-video-synthesis' 12 | description = '视频生成服务,针对英文文本输入,生成一段描述视频' 13 | 14 | name = 'video-generation' 15 | parameters: list = [{ 16 | 'name': 'input', 17 | 'description': '用户输入的文本信息,仅支持英文文本描述', 18 | 'required': True, 19 | 'type': 'string' 20 | }] 21 | task = Tasks.text_to_video_synthesis 22 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/text-to-video-synthesis' 23 | 24 | def call(self, params: str, **kwargs) -> str: 25 | result = super().call(params, **kwargs) 26 | video = result['Data']['output_video'] 27 | return str(VideoWrapper(video, **kwargs)) 28 | 29 | def _remote_call(self, params: dict, **kwargs): 30 | text = params['input'] 31 | params['input'] = {'text': text} 32 | return super()._remote_call(params, **kwargs) 33 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/translation_en2zh_tool.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.tools.base import register_tool 2 | 3 | from modelscope.utils.constant import Tasks 4 | from .pipeline_tool import ModelscopePipelineTool 5 | 6 | 7 | @register_tool('text-translation-en2zh') 8 | class TranslationEn2ZhTool(ModelscopePipelineTool): 9 | default_model = 'damo/nlp_csanmt_translation_en2zh' 10 | description = '根据输入指令,将相应的英文文本翻译成中文回复' 11 | name = 'text-translation-en2zh' 12 | parameters: list = [{ 13 | 'name': 'input', 14 | 'description': '用户输入的英文文本', 15 | 'required': True, 16 | 'type': 'string' 17 | }] 18 | task = Tasks.translation 19 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/nlp_csanmt_translation_en2zh' 20 | 21 | def call(self, params: str, **kwargs) -> str: 22 | result = super().call(params, **kwargs) 23 | zh = result['Data']['translation'] 24 | return zh 25 | -------------------------------------------------------------------------------- /modelscope_agent/tools/modelscope_tools/translation_zh2en_tool.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.tools.base import register_tool 2 | 3 | from modelscope.utils.constant import Tasks 4 | from .pipeline_tool import ModelscopePipelineTool 5 | 6 | 7 | @register_tool('text-translation-zh2en') 8 | class TranslationZh2EnTool(ModelscopePipelineTool): 9 | default_model = 'damo/nlp_csanmt_translation_zh2en' 10 | description = '根据输入指令,将相应的中文文本翻译成英文回复' 11 | name = 'text-translation-zh2en' 12 | parameters: list = [{ 13 | 'name': 'input', 14 | 'description': '用户输入的中文文本', 15 | 'required': True, 16 | 'type': 'string' 17 | }] 18 | task = Tasks.translation 19 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/nlp_csanmt_translation_zh2en' 20 | 21 | def call(self, params: str, **kwargs) -> str: 22 | result = super().call(params, **kwargs) 23 | en = result['Data']['translation'] 24 | return en 25 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/Finance/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from ....utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'current_exchage': 7 | ['ListquotesForCurrentExchange', 'exchange_for_current_exchange'], 8 | } 9 | 10 | sys.modules[__name__] = _LazyModule( 11 | __name__, 12 | globals()['__file__'], 13 | _import_structure, 14 | module_spec=__spec__, 15 | ) 16 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/Modelscope/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from ....utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'text_ie_tool': 7 | ['TextinfoextracttoolForAlphaUmi', 'exchange_for_current_exchange'], 8 | } 9 | 10 | sys.modules[__name__] = _LazyModule( 11 | __name__, 12 | globals()['__file__'], 13 | _import_structure, 14 | module_spec=__spec__, 15 | ) 16 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/Modelscope/text_ie_tool.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from typing import Union 3 | 4 | from modelscope_agent.tools import register_tool 5 | 6 | from modelscope.utils.constant import Tasks 7 | from .pipeline_tool import ModelscopepipelinetoolForAlphaUmi 8 | 9 | 10 | @register_tool('model_scope_text_ie') 11 | class TextinfoextracttoolForAlphaUmi(ModelscopepipelinetoolForAlphaUmi): 12 | default_model = 'damo/nlp_structbert_siamese-uie_chinese-base' 13 | description = 'Information extraction service for Chinese text, which extracts specific content according to a predefined schema, \ 14 | identifies the corresponding information, and displays it in JSON format.' 15 | 16 | name = 'model_scope_text_ie' 17 | parameters: list = [{ 18 | 'name': 'input', 19 | 'description': 'text input by user', 20 | 'required': True, 21 | 'type': 'string' 22 | }, { 23 | 'name': 'schema', 24 | 'description': 'a json schema for the extrated information', 25 | 'required': True, 26 | 'type': 'dict' 27 | }] 28 | task = Tasks.siamese_uie 29 | url = 'https://api-inference.modelscope.cn/api-inference/v1/models/damo/nlp_structbert_siamese-uie_chinese-base' 30 | 31 | def call(self, params: str, **kwargs) -> str: 32 | result = super().call(params, **kwargs) 33 | print(result) 34 | InfoExtract = defaultdict(list) 35 | for e in result['Data']['output']: 36 | InfoExtract[e[0]['type']].append(e[0]['span']) 37 | return str(dict(InfoExtract)) 38 | 39 | def _verify_args(self, params: str) -> Union[str, dict]: 40 | params = super()._verify_args(params) 41 | params['parameters'] = {'schema': params['schema']} 42 | params.pop('schema') 43 | return params 44 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/Movies/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from ....utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'movie_tv_music_search_and_download': [ 7 | 'SearchTorrentsForMovieTvMusicSearchAndDownload', 8 | 'GetMonthlyTop100MusicTorrentsForMovieTvMusicSearchAndDownload', 9 | 'GetMonthlyTop100GamesTorrentsForMovieTvMusicSearchAndDownload', 10 | 'GetMonthlyTop100TvShowsTorrentsForMovieTvMusicSearchAndDownload', 11 | 'GetMonthlyTop100MoviesTorrentsTorrentsForMovieTvMusicSearchAndDownload' 12 | ], 13 | } 14 | 15 | sys.modules[__name__] = _LazyModule( 16 | __name__, 17 | globals()['__file__'], 18 | _import_structure, 19 | module_spec=__spec__, 20 | ) 21 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/Number/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from ....utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'numbers': [ 7 | 'GetDataFactForNumbers', 'GetMathFactForNumbers', 8 | 'GetYearFactForNumbers' 9 | ], 10 | } 11 | 12 | sys.modules[__name__] = _LazyModule( 13 | __name__, 14 | globals()['__file__'], 15 | _import_structure, 16 | module_spec=__spec__, 17 | ) 18 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/Translate/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from ....utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'google_translate': [ 7 | 'DetectForGoogleTranslate', 'LanguagesForGoogleTranslate', 8 | 'TranslateForGoogleTranslate' 9 | ], 10 | } 11 | 12 | sys.modules[__name__] = _LazyModule( 13 | __name__, 14 | globals()['__file__'], 15 | _import_structure, 16 | module_spec=__spec__, 17 | ) 18 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from modelscope_agent.utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'Finance': 7 | ['ListquotesForCurrentExchange', 'exchange_for_current_exchange'], 8 | 'Modelscope': ['TextinfoextracttoolForAlphaUmi'], 9 | 'Movies': [ 10 | 'GetMonthlyTop100GamesTorrentsForMovieTvMusicSearchAndDownload', 11 | 'GetMonthlyTop100MoviesTorrentsTorrentsForMovieTvMusicSearchAndDownload', 12 | 'GetMonthlyTop100MusicTorrentsForMovieTvMusicSearchAndDownload', 13 | 'GetMonthlyTop100TvShowsTorrentsForMovieTvMusicSearchAndDownload', 14 | 'SearchTorrentsForMovieTvMusicSearchAndDownload' 15 | ], 16 | 'Number': [ 17 | 'GetDataFactForNumbers', 'GetMathFactForNumbers', 18 | 'GetYearFactForNumbers' 19 | ], 20 | 'Translate': [ 21 | 'DetectForGoogleTranslate', 'LanguagesForGoogleTranslate', 22 | 'TranslateForGoogleTranslate' 23 | ] 24 | } 25 | 26 | sys.modules[__name__] = _LazyModule( 27 | __name__, 28 | globals()['__file__'], 29 | _import_structure, 30 | module_spec=__spec__, 31 | ) 32 | -------------------------------------------------------------------------------- /modelscope_agent/tools/rapidapi_tools/basetool_for_alpha_umi.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Dict, List, Optional, Union 3 | 4 | import json 5 | import json5 6 | from modelscope_agent.tools.base import BaseTool 7 | from modelscope_agent.utils.utils import has_chinese_chars 8 | 9 | TOOL_REGISTRY = {} 10 | 11 | 12 | def register_tool(name): 13 | 14 | def decorator(cls): 15 | TOOL_REGISTRY[name] = cls 16 | return cls 17 | 18 | return decorator 19 | 20 | 21 | class BasetoolAlphaUmi(BaseTool): 22 | name: str 23 | description: str 24 | parameters: List[Dict] 25 | 26 | def __init__(self, cfg: Optional[Dict] = {}): 27 | """ 28 | :param schema: Format of tools, default to oai format, in case there is a need for other formats 29 | """ 30 | self.cfg = cfg.get(self.name, {}) 31 | 32 | self.schema = 'alpha_umi' 33 | self.function = self._build_function() 34 | self.function_plain_text = self._parser_function() 35 | 36 | def _build_function(self): 37 | """ 38 | The dict format after applying the template to the function, such as oai format 39 | 40 | """ 41 | input_doc = {} 42 | for p in self.parameters: 43 | input_doc[p['name']] = ( 44 | p['type'] + ', ' 45 | + 'required, ' if p['required'] else 'optional, ' 46 | + p['description'][:128]) 47 | 48 | function = { 49 | 'Name': self.name[-64:], 50 | 'function': self.description[-256:], 51 | 'input': input_doc 52 | } 53 | 54 | return function 55 | 56 | def _parser_function(self): 57 | """ 58 | Text description of function 59 | 60 | """ 61 | 62 | return json.dumps(self.function) 63 | -------------------------------------------------------------------------------- /modelscope_agent/tools/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/tools/utils/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/tools/web_search/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from modelscope_agent.utils import _LazyModule 4 | 5 | _import_structure = { 6 | 'web_search': ['WebSearch'], 7 | } 8 | 9 | sys.modules[__name__] = _LazyModule( 10 | __name__, 11 | globals()['__file__'], 12 | _import_structure, 13 | module_spec=__spec__, 14 | ) 15 | -------------------------------------------------------------------------------- /modelscope_agent/tools/web_search/search_util.py: -------------------------------------------------------------------------------- 1 | import os 2 | from enum import Enum 3 | 4 | from modelscope_agent.constants import ApiNames 5 | from pydantic import BaseModel, model_validator 6 | 7 | 8 | class SearchResult(BaseModel): 9 | title: str 10 | link: str = None 11 | sniper: str = None 12 | 13 | @model_validator(mode='before') 14 | def validate_values(self): 15 | if self['link'] is None and self['sniper'] is None: 16 | raise ValueError('Either link or sniper must be provided.') 17 | return self 18 | 19 | 20 | class AuthenticationKey(Enum): 21 | bing = ApiNames.bing_api_key.value 22 | kuake = 'PLACE_HOLDER' 23 | 24 | @classmethod 25 | def to_dict(cls): 26 | return {member.name: member.value for member in cls} 27 | 28 | 29 | def get_websearcher_cls(searcher: str = None): 30 | 31 | if searcher: 32 | if AuthenticationKey.bing.name == searcher: 33 | from .searcher.bing import BingWebSearcher 34 | return BingWebSearcher 35 | elif AuthenticationKey.kuake.name == searcher: 36 | from .searcher.kuake import KuakeWebSearcher 37 | return KuakeWebSearcher 38 | return None 39 | -------------------------------------------------------------------------------- /modelscope_agent/tools/web_search/searcher/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/tools/web_search/searcher/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/tools/web_search/searcher/base_searcher.py: -------------------------------------------------------------------------------- 1 | class WebSearcher: 2 | timeout = 1000 3 | 4 | def __call__(self, **kwargs): 5 | raise NotImplementedError() 6 | -------------------------------------------------------------------------------- /modelscope_agent/tools/web_search/searcher/kuake.py: -------------------------------------------------------------------------------- 1 | from .base_searcher import WebSearcher 2 | 3 | 4 | class KuakeWebSearcher(WebSearcher): 5 | 6 | def __call__(self, query, **kwargs): 7 | raise NotImplementedError() 8 | -------------------------------------------------------------------------------- /modelscope_agent/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .import_utils import _LazyModule 2 | -------------------------------------------------------------------------------- /modelscope_agent/utils/base64_utils.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import os 3 | 4 | 5 | def encode_file_to_base64(file_path: str): 6 | # Read the file content and encode it to base64 7 | with open(file_path, 'rb') as file_to_encode: 8 | encoded_content = base64.b64encode(file_to_encode.read()) 9 | return encoded_content.decode('utf-8') 10 | 11 | 12 | def decode_base64_to_file(base64_string: str, file_path: str): 13 | # Decode the base64 string to get the binary content 14 | file_content = base64.b64decode(base64_string) 15 | 16 | # Create the directory if it doesn't exist 17 | os.makedirs(os.path.dirname(file_path), exist_ok=True) 18 | 19 | # Write the binary content to the file 20 | with open(file_path, 'wb') as output_file: 21 | output_file.write(file_content) 22 | 23 | 24 | def encode_files_to_base64(file_paths: list): 25 | # Encode multiple files to base64 in a dict 26 | encoded_files = {} 27 | for file_path in file_paths: 28 | base64_image = encode_file_to_base64(file_path) 29 | encoded_files[os.path.basename(file_path)] = base64_image 30 | return encoded_files 31 | 32 | 33 | def decode_base64_to_files(base64_files_dict: dict, output_dir: str): 34 | # Decode multiple base64 strings to files from a dict 35 | decoded_files = {} 36 | for file_name in base64_files_dict: 37 | file_path = os.path.join(output_dir, file_name) 38 | decode_base64_to_file(base64_files_dict[file_name], file_path) 39 | decoded_files[file_name] = file_path 40 | return decoded_files 41 | -------------------------------------------------------------------------------- /modelscope_agent/utils/git.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | 4 | def clone_git_repository(repo_url, branch_name, folder_name): 5 | """ 6 | Clone a git repository into a specified folder. 7 | Args: 8 | repo_url: user repository url 9 | branch_name: branch name 10 | folder_name: where should the repository be cloned 11 | 12 | Returns: 13 | 14 | """ 15 | try: 16 | subprocess.run( 17 | ['git', 'clone', '-b', branch_name, repo_url, folder_name], 18 | check=True, 19 | stdout=subprocess.PIPE, 20 | stderr=subprocess.PIPE) 21 | print(f"Repository cloned successfully into '{folder_name}'.") 22 | 23 | except subprocess.CalledProcessError as e: 24 | print(f'Error cloning repository: {e.stderr.decode()}') 25 | raise RuntimeError(f'Repository cloning failed with e {e}') 26 | -------------------------------------------------------------------------------- /modelscope_agent/utils/nltk/averaged_perceptron_tagger.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/utils/nltk/averaged_perceptron_tagger.zip -------------------------------------------------------------------------------- /modelscope_agent/utils/nltk/punkt.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/utils/nltk/punkt.zip -------------------------------------------------------------------------------- /modelscope_agent/utils/nltk/stopwords.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/utils/nltk/stopwords.zip -------------------------------------------------------------------------------- /modelscope_agent/utils/qwen_agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/utils/qwen_agent/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/utils/qwen_agent/fncall_prompts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent/utils/qwen_agent/fncall_prompts/__init__.py -------------------------------------------------------------------------------- /modelscope_agent/utils/qwen_agent/settings.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import os 3 | from typing import List, Literal 4 | 5 | # Settings for LLMs 6 | DEFAULT_MAX_INPUT_TOKENS: int = int( 7 | os.getenv('QWEN_AGENT_DEFAULT_MAX_INPUT_TOKENS', 58000) 8 | ) # The LLM will truncate the input messages if they exceed this limit 9 | 10 | # Settings for agents 11 | MAX_LLM_CALL_PER_RUN: int = int( 12 | os.getenv('QWEN_AGENT_MAX_LLM_CALL_PER_RUN', 20)) 13 | 14 | # Settings for tools 15 | DEFAULT_WORKSPACE: str = os.getenv('QWEN_AGENT_DEFAULT_WORKSPACE', 'workspace') 16 | 17 | # Settings for RAG 18 | DEFAULT_MAX_REF_TOKEN: int = int( 19 | os.getenv('QWEN_AGENT_DEFAULT_MAX_REF_TOKEN', 20 | 20000)) # The window size reserved for RAG materials 21 | DEFAULT_PARSER_PAGE_SIZE: int = int( 22 | os.getenv('QWEN_AGENT_DEFAULT_PARSER_PAGE_SIZE', 23 | 500)) # Max tokens per chunk when doing RAG 24 | DEFAULT_RAG_KEYGEN_STRATEGY: Literal[ 25 | 'None', 'GenKeyword', 'SplitQueryThenGenKeyword', 26 | 'GenKeywordWithKnowledge', 27 | 'SplitQueryThenGenKeywordWithKnowledge'] = os.getenv( 28 | 'QWEN_AGENT_DEFAULT_RAG_KEYGEN_STRATEGY', 'GenKeyword') 29 | DEFAULT_RAG_SEARCHERS: List[str] = ast.literal_eval( 30 | os.getenv('QWEN_AGENT_DEFAULT_RAG_SEARCHERS', 31 | "['keyword_search', 'front_page_search']") 32 | ) # Sub-searchers for hybrid retrieval 33 | -------------------------------------------------------------------------------- /modelscope_agent/utils/retry.py: -------------------------------------------------------------------------------- 1 | import time 2 | from functools import wraps 3 | from traceback import format_exc 4 | 5 | from modelscope_agent.utils.logger import agent_logger as logger 6 | 7 | 8 | def retry(max_retries=3, delay_seconds=1, return_str=False): 9 | """ 10 | Retry decorator with exponential backoff. 11 | Args: 12 | max_retries: max retry times 13 | delay_seconds: delay seconds between retries 14 | return_str: want to return in str format, set it to True 15 | 16 | Returns:func 17 | 18 | """ 19 | 20 | def decorator(func): 21 | 22 | @wraps(func) 23 | def wrapper(*args, **kwargs): 24 | attempts = 0 25 | while attempts < max_retries: 26 | try: 27 | return func(*args, **kwargs) 28 | except AssertionError as e: 29 | raise AssertionError(e) 30 | except Exception: 31 | logger.warning( 32 | f'Attempt to run {func.__name__} {attempts + 1} failed: {format_exc()}' 33 | ) 34 | attempts += 1 35 | time.sleep(delay_seconds) 36 | if return_str: 37 | return f'Max retries reached. Attempt to run {func.__name__} failed after {max_retries} times' 38 | else: 39 | raise Exception('Max retries reached. Failed to get result') 40 | 41 | return wrapper 42 | 43 | return decorator 44 | -------------------------------------------------------------------------------- /modelscope_agent/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.8.0' 2 | -------------------------------------------------------------------------------- /modelscope_agent_servers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent_servers/__init__.py -------------------------------------------------------------------------------- /modelscope_agent_servers/assistant_server/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent_servers/assistant_server/__init__.py -------------------------------------------------------------------------------- /modelscope_agent_servers/requirements.txt: -------------------------------------------------------------------------------- 1 | docker 2 | fastapi 3 | modelscope-agent>=0.6.2 4 | pydantic 5 | sqlmodel 6 | uvicorn 7 | -------------------------------------------------------------------------------- /modelscope_agent_servers/service_utils.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Union 2 | 3 | import json 4 | from fastapi.responses import JSONResponse 5 | from requests import Response 6 | 7 | 8 | def create_success_msg(output: Union[Dict, str], 9 | request_id: str, 10 | message: str = '', 11 | **kwargs): 12 | content = {'request_id': request_id, 'message': message, 'output': output} 13 | content.update(kwargs) 14 | return JSONResponse(content=content) 15 | 16 | 17 | def create_error_msg(message: str, request_id: str, status_code: int = 400): 18 | return JSONResponse( 19 | content={ 20 | 'request_id': request_id, 21 | 'message': message 22 | }, 23 | status_code=status_code) 24 | 25 | 26 | def parse_service_response(response: Response): 27 | try: 28 | # Assuming the response is a JSON string 29 | response_data = response.json() 30 | 31 | # Extract the 'output' field from the response 32 | output_data = response_data.get('output', {}) 33 | return output_data 34 | except json.JSONDecodeError: 35 | # Handle the case where response is not JSON or cannot be decoded 36 | return None 37 | -------------------------------------------------------------------------------- /modelscope_agent_servers/tool_manager_server/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent_servers/tool_manager_server/__init__.py -------------------------------------------------------------------------------- /modelscope_agent_servers/tool_manager_server/connections.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import docker 4 | from sqlmodel import Session, SQLModel, create_engine 5 | 6 | # database setting 7 | DATABASE_DIR = os.getenv('DATABASE_DIR', './test.db') 8 | DATABASE_URL = os.path.join('sqlite:///', DATABASE_DIR) 9 | 10 | engine = create_engine(DATABASE_URL, echo=True) 11 | 12 | 13 | def create_db_and_tables(): 14 | SQLModel.metadata.create_all(engine) 15 | 16 | 17 | def drop_db_and_tables(): 18 | SQLModel.metadata.drop_all(engine) 19 | 20 | 21 | def get_docker_client(): 22 | # Initialize docker client. Throws an exception if Docker is not reachable. 23 | try: 24 | docker_client = docker.from_env() 25 | except docker.errors.DockerException as e: 26 | print('Please check Docker is running using `docker ps`.') 27 | print(f'Error! {e}', flush=True) 28 | raise e 29 | return docker_client 30 | 31 | 32 | def get_session(): 33 | with Session(engine) as session: 34 | yield session 35 | -------------------------------------------------------------------------------- /modelscope_agent_servers/tool_manager_server/models.py: -------------------------------------------------------------------------------- 1 | import os 2 | from enum import Enum 3 | from typing import Dict, Optional, Union 4 | 5 | from pydantic import BaseModel 6 | from sqlmodel import Field, SQLModel 7 | 8 | 9 | class ToolInstance(SQLModel, table=True): 10 | id: Optional[int] = Field(default=None, primary_key=True) 11 | name: str 12 | status: Optional[str] # including "pending", "running", "exited", "failed" 13 | image: Optional[str] = None 14 | tenant_id: Optional[str] = None 15 | container_id: Optional[str] = None 16 | ip: Optional[str] = None 17 | port: Optional[int] = 31513 18 | error: Optional[str] = None 19 | 20 | 21 | class ToolRegisterInfo(BaseModel): 22 | node_name: str 23 | image: str = '' 24 | workspace_dir: str = os.getcwd() 25 | tool_name: str 26 | tenant_id: str 27 | config: Dict = {} 28 | port: Optional[int] = 31513 29 | tool_url: str = '' 30 | 31 | 32 | class CreateTool(BaseModel): 33 | tool_name: str 34 | tenant_id: str = 'default' 35 | tool_cfg: Dict = {} 36 | tool_image: str = 'modelscope-agent/tool-node:latest' 37 | tool_url: str = '' 38 | 39 | 40 | class ExecuteTool(BaseModel): 41 | tool_name: str 42 | tenant_id: str = 'default' 43 | params: str = '' 44 | kwargs: Dict = {} 45 | 46 | 47 | class ExecuteOpenAPISchema(BaseModel): 48 | openapi_name: str = '' 49 | url: str = '' 50 | params: Union[str, Dict] = '' 51 | headers: Dict = {} 52 | method: str = 'GET' 53 | data: Dict = {} 54 | cookies: Dict = {} 55 | 56 | 57 | class ContainerStatus(Enum): 58 | pending = 'pending' 59 | running = 'running' 60 | exited = 'exited' 61 | failed = 'failed' 62 | -------------------------------------------------------------------------------- /modelscope_agent_servers/tool_manager_server/utils.py: -------------------------------------------------------------------------------- 1 | NODE_PORT_START = 31513 2 | NODE_PORT_END = 65535 3 | 4 | 5 | class SingletonMeta(type): 6 | _instances = {} 7 | 8 | def __call__(cls, *args, **kwargs): 9 | if cls not in cls._instances: 10 | cls._instances[cls] = super(SingletonMeta, 11 | cls).__call__(*args, **kwargs) 12 | return cls._instances[cls] 13 | 14 | 15 | class PortGenerator(metaclass=SingletonMeta): 16 | 17 | def __init__(self, start=NODE_PORT_START, end=NODE_PORT_END): 18 | self.start = start 19 | self.end = end 20 | self.allocated = set() 21 | 22 | def __iter__(self): 23 | return self 24 | 25 | def __next__(self): 26 | for port in range(self.start, self.end + 1): 27 | if port not in self.allocated: 28 | self.allocated.add(port) 29 | return port 30 | raise StopIteration # throw StopIteration when no available port 31 | 32 | def release(self, port): 33 | self.allocated.discard( 34 | port) # rlz: discard is more efficient than remove 35 | -------------------------------------------------------------------------------- /modelscope_agent_servers/tool_node_server/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/modelscope_agent_servers/tool_node_server/__init__.py -------------------------------------------------------------------------------- /modelscope_agent_servers/tool_node_server/assets/configuration.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "image_gen", 3 | "image_gen" : { 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /modelscope_agent_servers/tool_node_server/models.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | 4 | class ToolRequest(BaseModel): 5 | params: str 6 | kwargs: dict = {} 7 | messages: list = [] 8 | request_id: str 9 | 10 | 11 | class ToolResponse(BaseModel): 12 | result: str 13 | messages: list = [] 14 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | dashscope>=1.20.1 2 | faiss-cpu 3 | grpcio 4 | jieba 5 | json5 6 | jsonref 7 | jupyter>=1.0.0 8 | langchain 9 | langchain-community 10 | langchain-experimental 11 | llama-index==0.10.29 12 | llama-index-core==0.10.39.post1 13 | llama-index-readers-json 14 | llama-index-retrievers-bm25==0.1.5 15 | modelscope[framework]>=1.16.0 16 | openai 17 | opencv-python 18 | openpyxl 19 | pdfminer.six 20 | Pillow 21 | pydantic>=2.3.0 22 | pytest 23 | pytest-mock 24 | python-dotenv 25 | requests>=2.32.3 26 | seaborn 27 | sentencepiece 28 | tiktoken 29 | unstructured 30 | -------------------------------------------------------------------------------- /requirements/docs.txt: -------------------------------------------------------------------------------- 1 | docutils>=0.16.0 2 | myst_parser 3 | recommonmark 4 | sphinx>=5.3.0 5 | sphinx-book-theme 6 | sphinx-copybutton 7 | sphinx-rtd-theme 8 | sphinx_markdown_tables 9 | -------------------------------------------------------------------------------- /resources/MSAgent-Bench.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/MSAgent-Bench.png -------------------------------------------------------------------------------- /resources/data_science_assistant_streamlit_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/data_science_assistant_streamlit_1.png -------------------------------------------------------------------------------- /resources/data_science_assistant_streamlit_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/data_science_assistant_streamlit_2.png -------------------------------------------------------------------------------- /resources/data_science_assistant_streamlit_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/data_science_assistant_streamlit_3.png -------------------------------------------------------------------------------- /resources/data_science_assistant_streamlit_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/data_science_assistant_streamlit_4.png -------------------------------------------------------------------------------- /resources/modelscope-agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/modelscope-agent.png -------------------------------------------------------------------------------- /resources/modelscopegpt_case_knowledge-qa.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/modelscopegpt_case_knowledge-qa.png -------------------------------------------------------------------------------- /resources/modelscopegpt_case_multi-modal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/modelscopegpt_case_multi-modal.png -------------------------------------------------------------------------------- /resources/modelscopegpt_case_multi-turn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/modelscopegpt_case_multi-turn.png -------------------------------------------------------------------------------- /resources/modelscopegpt_case_single-step.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/modelscopegpt_case_single-step.png -------------------------------------------------------------------------------- /resources/modelscopegpt_case_video-generation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/resources/modelscopegpt_case_video-generation.png -------------------------------------------------------------------------------- /scripts/run_tool_manager.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check if `docker` cmd exists 4 | if ! command -v docker &> /dev/null 5 | then 6 | echo "Docker could not be found" 7 | exit 1 8 | else 9 | echo "Docker is installed" 10 | # check if docker dameon is running 11 | if ! docker info &> /dev/null; then 12 | echo "Docker daemon is not running" 13 | exit 1 14 | else 15 | echo "Docker daemon is running" 16 | fi 17 | fi 18 | 19 | 20 | # use venv 21 | if [ ! -d "venv" ]; then 22 | echo "Creating virtual environment..." 23 | python3 -m venv venv 24 | else 25 | echo "Virtual environment already exists." 26 | fi 27 | 28 | # build tool node image 29 | function build_docker_image { 30 | echo "Building tool node image, the first time might be token 10 mins." 31 | docker build -f docker/tool_node.dockerfile -t modelscope-agent/tool-node . 32 | } 33 | 34 | # install dependencies might be done in venv, not much dependencies here 35 | echo "Installing dependencies from requirements.txt..." 36 | pip3 install -r modelscope_agent_servers/requirements.txt 37 | 38 | # Check if the first argument is "build", if so, build the Docker image 39 | if [ "$1" == "build" ]; then 40 | build_docker_image 41 | else 42 | echo "Skipping Docker build as per the input argument." 43 | fi 44 | 45 | # running 46 | echo "Running fastapi tool manager server at port 31511." 47 | export PYTHONPATH=$PYTHONPATH:modelscope_agent_servers 48 | uvicorn modelscope_agent_servers.tool_manager_server.api:app --host 0.0.0.0 --port 31511 49 | -------------------------------------------------------------------------------- /scripts/run_tool_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # get OSS_URL from ENV 4 | OSS_URL=${TOOL_OSS_URL} 5 | ZIP_FILE_NAME="new_tool.zip" 6 | DESTINATION_FOLDER="/app/modelscope_agent/tools/contrib/new_tool" 7 | 8 | mkdir -p /app/assets 9 | echo "{\"name\": \"${TOOL_NAME}\"}" > /app/assets/configuration.json 10 | 11 | # check if OSS_URL is empty, if empty them run a normal tool node server. 12 | if [ -z "${OSS_URL}" ]; then 13 | uvicorn modelscope_agent_servers.tool_node_server.api:app --host 0.0.0.0 --port "$1" 14 | fi 15 | 16 | # Make sure the destination folder exists 17 | mkdir -p "${DESTINATION_FOLDER}" 18 | 19 | # download the zip file 20 | wget -O "${ZIP_FILE_NAME}" "${OSS_URL}" 21 | 22 | # check if download is successful 23 | if [ $? -ne 0 ]; then 24 | echo "Download failed." 25 | exit 1 26 | else 27 | echo "Downloaded ${ZIP_FILE_NAME} successfully." 28 | 29 | # unzip the downloaded file 30 | unzip -o "${ZIP_FILE_NAME}" -d "${DESTINATION_FOLDER}" 31 | for subfolder in "${DESTINATION_FOLDER}"/*; do 32 | if [ -d "$subfolder" ]; then # Check if it's a directory 33 | find "$subfolder" -type f -exec mv {} "${DESTINATION_FOLDER}"/ \; 34 | # Optionally, remove the now-empty subdirectory 35 | rmdir "$subfolder" 36 | fi 37 | done 38 | echo "from .new_tool import *" >> /app/modelscope_agent/tools/contrib/__init__.py 39 | 40 | # check if extraction is successful 41 | if [ $? -ne 0 ]; then 42 | echo "Extraction failed." 43 | exit 1 44 | else 45 | echo "Extracted ${ZIP_FILE_NAME} into ${DESTINATION_FOLDER}." 46 | 47 | # clean up the downloaded zip file 48 | rm "${ZIP_FILE_NAME}" 49 | echo "Removed the downloaded zip file." 50 | fi 51 | fi 52 | 53 | # get config from ENV 54 | TOOL_NAME=${TOOL_NAME} 55 | 56 | uvicorn modelscope_agent_servers.tool_node_server.api:app --host 0.0.0.0 --port "$1" 57 | #sleep 90m 58 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | line_length = 79 3 | multi_line_output = 0 4 | known_standard_library = setuptools 5 | known_first_party = modelscope 6 | known_third_party = json,yaml 7 | no_lines_before = STDLIB,LOCALFOLDER 8 | default_section = THIRDPARTY 9 | 10 | [yapf] 11 | BASED_ON_STYLE = pep8 12 | BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true 13 | SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true 14 | SPLIT_BEFORE_ARITHMETIC_OPERATOR = true 15 | 16 | [codespell] 17 | skip = *.ipynb 18 | quiet-level = 3 19 | ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids 20 | 21 | [flake8] 22 | max-line-length = 120 23 | select = B,C,E,F,P,T4,W,B9 24 | ignore = F401,F405,F821,W503,E251 25 | exclude = docs/src,*.pyi,.git 26 | 27 | [darglint] 28 | ignore=DAR101 29 | 30 | [easy_install] 31 | index-url=https://pypi.tuna.tsinghua.edu.cn/simple 32 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | from typing import List 3 | 4 | 5 | def parse_requirements(file_name: str) -> List[str]: 6 | with open(file_name) as f: 7 | return [ 8 | require.strip() for require in f 9 | if require.strip() and not require.startswith('#') 10 | ] 11 | 12 | 13 | def readme(): 14 | with open('README.md', encoding='utf-8') as f: 15 | content = f.read() 16 | return content 17 | 18 | 19 | version_file = 'modelscope_agent/version.py' 20 | 21 | 22 | def get_version(): 23 | with open(version_file, 'r', encoding='utf-8') as f: 24 | exec(compile(f.read(), version_file, 'exec')) 25 | return locals()['__version__'] 26 | 27 | 28 | setup( 29 | name= 30 | 'modelscope-agent', # Replace 'your_package_name' with the name of your package 31 | version=get_version(), # Replace with the desired version number 32 | description= 33 | 'ModelScope Agent: Be a powerful models and tools agent based on ModelScope and open source LLM.', 34 | author='Modelscope Team', 35 | author_email='contact@modelscope.cn', 36 | keywords='python,agent,LLM,AIGC,qwen,ModelScope', 37 | url= 38 | 'https://github.com/modelscope/modelscope-agent', # Replace with your repository URL 39 | license='Apache License 2.0', 40 | packages=find_packages(exclude=['*test*', 'demo']), 41 | include_package_data=True, 42 | install_requires=parse_requirements('requirements.txt'), 43 | long_description=readme(), 44 | long_description_content_type='text/markdown', 45 | package_data={ 46 | 'modelscope_agent.tools.mcp.servers.notebook': ['config.json'], 47 | }, 48 | ) 49 | -------------------------------------------------------------------------------- /test_single.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.agent import Agent # NOQA 2 | 3 | import os 4 | 5 | 6 | def test(): 7 | 8 | # input tool name 9 | mcp_servers = { 10 | 'mcpServers': { 11 | 'time': { 12 | 'type': 13 | 'sse', 14 | 'url': 15 | 'https://agenttor-mod-dd-cbwtrtihpn.cn-zhangjiakou.fcapp.run/sse' 16 | }, 17 | 'fetch': { 18 | 'type': 19 | 'sse', 20 | 'url': 21 | 'https://mcp-cdb79f47-15a7-4a72.api-inference.modelscope.cn/sse' 22 | } 23 | } 24 | } 25 | 26 | default_system = ( 27 | 'You are an assistant which helps me to finish a complex job. Tools may be given to you ' 28 | 'and you must choose some of them one per round to finish my request.') 29 | 30 | llm_config = { 31 | 'model': 'Qwen/Qwen2.5-72B-Instruct', 32 | 'model_server': 'openai', 33 | 'api_base': 'https://api-inference.modelscope.cn/v1/', 34 | 'api_key': os.getenv('MODELSCOPE_API_KEY') 35 | } 36 | # llm_config = { 37 | # 'model': 'claude-3-7-sonnet-20250219', 38 | # 'model_server': 'openai', 39 | # 'api_base': 'https://dashscope.aliyuncs.com/compatible-mode/v1', 40 | # 'api_key': os.getenv('DASHSCOPE_API_KEY_YH') 41 | # } 42 | 43 | bot = Agent(mcp=mcp_servers, llm=llm_config, instruction=default_system) 44 | 45 | response = bot.run('上周日几号?那天北京天气情况如何') 46 | 47 | text = '' 48 | for chunk in response: 49 | text += chunk 50 | print(text) 51 | assert isinstance(text, str) 52 | 53 | 54 | test() 55 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/__init__.py -------------------------------------------------------------------------------- /tests/agents/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/agents/__init__.py -------------------------------------------------------------------------------- /tests/agents/test_agent_builder.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from modelscope_agent.agents.agent_builder import AgentBuilder 5 | 6 | IS_FORKED_PR = os.getenv('IS_FORKED_PR', 'false') == 'true' 7 | 8 | 9 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 10 | def test_agent_builder(): 11 | llm_config = {'model': 'qwen-turbo', 'model_server': 'dashscope'} 12 | 13 | # input tool name 14 | function_list = ['image_gen'] 15 | 16 | bot = AgentBuilder(function_list=function_list, llm=llm_config) 17 | 18 | response = bot.run('创建一个多啦A梦') 19 | 20 | text = '' 21 | for chunk in response: 22 | text += chunk 23 | print(text) 24 | assert isinstance(text, str) 25 | assert 'Answer:' in text 26 | assert 'Config:' in text 27 | assert 'RichConfig:' in text 28 | -------------------------------------------------------------------------------- /tests/agents/test_agent_with_api_tool.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | import requests 5 | from modelscope_agent.agents.role_play import RolePlay 6 | from modelscope_agent.constants import DEFAULT_TOOL_MANAGER_SERVICE_URL 7 | 8 | IN_GITHUB_ACTIONS = os.getenv('GITHUB_ACTIONS') == 'true' 9 | 10 | 11 | def check_url(url: str): 12 | try: 13 | response = requests.get(url, timeout=5) # request url 14 | if response.status_code == 200: 15 | print( 16 | f'{url} is accessible and returned a successful status code.') 17 | return True 18 | except requests.ConnectionError: 19 | print(f'{url} is not accessible due to a connection error.') 20 | except requests.Timeout: 21 | print(f'Request to {url} timed out.') 22 | except requests.RequestException as e: 23 | print(f'An error occurred while trying to access {url}: {e}') 24 | 25 | return False 26 | 27 | 28 | @pytest.mark.skipif( 29 | IN_GITHUB_ACTIONS, reason='Need to set up the docker environment') 30 | def test_role_play_with(): 31 | llm_config = {'model': 'qwen-turbo', 'model_server': 'dashscope'} 32 | 33 | # input tool name 34 | function_list = ['image_gen'] 35 | 36 | is_accessible = check_url(DEFAULT_TOOL_MANAGER_SERVICE_URL) 37 | 38 | if not is_accessible: 39 | assert False, """Start up the tool manager service by `sh scripts/run_tool_manager.sh`""" 40 | 41 | bot = RolePlay( 42 | function_list=function_list, llm=llm_config, use_tool_api=True) 43 | 44 | response = bot.run( 45 | '创建一个多啦A梦', dashscope_api_key=os.getenv('DASHSCOPE_API_KEY')) 46 | 47 | text = '' 48 | for chunk in response: 49 | text += chunk 50 | print(text) 51 | assert isinstance(text, str) 52 | assert 'Answer:' in text 53 | assert 'Observation:' in text 54 | assert '![IMAGEGEN]' in text 55 | -------------------------------------------------------------------------------- /tests/agents/test_memory_with_file_knowledge.py: -------------------------------------------------------------------------------- 1 | # not implement yet 2 | -------------------------------------------------------------------------------- /tests/agents/test_memory_with_retrieval_knowledge.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from modelscope_agent.memory.memory_with_retrieval_knowledge import \ 5 | MemoryWithRetrievalKnowledge 6 | 7 | current_file_dir = os.path.dirname(os.path.abspath(__file__)) 8 | parent_dir = os.path.dirname(current_file_dir) 9 | 10 | 11 | @pytest.fixture 12 | def temporary_storage(tmpdir): 13 | # Use a temporary directory for testing storage 14 | return str(tmpdir.mkdir('knowledge_vector_test')) 15 | 16 | 17 | def test_memory_with_retrieval_knowledge(temporary_storage): 18 | random_name = 'test_memory_agent' 19 | 20 | memory = MemoryWithRetrievalKnowledge( 21 | storage_path=temporary_storage, 22 | name=random_name, 23 | memory_path=temporary_storage, 24 | ) 25 | test_file = os.path.join(parent_dir, 'samples') 26 | 27 | # test add file to 28 | memory.run(query=None, url=test_file) 29 | assert os.path.exists( 30 | os.path.join(temporary_storage, random_name + '.faiss')) 31 | assert os.path.exists( 32 | os.path.join(temporary_storage, random_name + '.pkl')) 33 | 34 | result = memory.run(query='介绍memory', max_token=1000) 35 | assert isinstance(result, str) 36 | assert len(result) > 0 and len(result) < 1000 37 | -------------------------------------------------------------------------------- /tests/llms/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/llms/__init__.py -------------------------------------------------------------------------------- /tests/llms/test_llm.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from modelscope_agent.llm import QwenChatAtDS 3 | 4 | prompt = 'Tell me a joke.' 5 | messages = [{ 6 | 'role': 'user', 7 | 'content': 'Hello.' 8 | }, { 9 | 'role': 'assistant', 10 | 'content': 'Hi there!' 11 | }, { 12 | 'role': 'user', 13 | 'content': 'Tell me a joke.' 14 | }] 15 | 16 | 17 | @pytest.fixture 18 | def chat_model(mocker): 19 | # using mock llm result as output 20 | llm_config = { 21 | 'model': 'qwen-max', 22 | 'model_server': 'dashscope', 23 | 'api_key': 'test' 24 | } 25 | chat_model = QwenChatAtDS(**llm_config) 26 | mocker.patch.object( 27 | chat_model, '_chat_stream', return_value=['hello', ' there']) 28 | mocker.patch.object( 29 | chat_model, '_chat_no_stream', return_value='hello there') 30 | return chat_model 31 | 32 | 33 | def test_chat_no_stream_with_prompt(chat_model): 34 | response = chat_model.chat(prompt=prompt) 35 | assert isinstance(response, str) 36 | assert response.strip() == 'hello there' 37 | 38 | 39 | def test_chat_no_stream_with_messages(chat_model): 40 | response = chat_model.chat(messages=messages) 41 | assert isinstance(response, str) 42 | assert response.strip() == 'hello there' 43 | 44 | 45 | def test_chat_stream_with_prompt(chat_model): 46 | responses = list(chat_model.chat(prompt=prompt, stream=True)) 47 | assert isinstance(responses, list) 48 | assert all(isinstance(resp, str) for resp in responses) 49 | assert all(resp.strip() for resp in responses) 50 | assert responses == ['hello', ' there'] 51 | 52 | 53 | def test_chat_no_stream_with_invalid_messages(chat_model): 54 | with pytest.raises( 55 | AssertionError, match='messages list must not be empty'): 56 | chat_model.chat(messages=[]) 57 | -------------------------------------------------------------------------------- /tests/llms/test_vllm.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from modelscope_agent.llm import OpenAi 3 | 4 | prompt = 'Tell me a joke.' 5 | messages = [{ 6 | 'role': 'user', 7 | 'content': 'Hello.' 8 | }, { 9 | 'role': 'assistant', 10 | 'content': 'Hi there!' 11 | }, { 12 | 'role': 'user', 13 | 'content': 'Tell me a joke.' 14 | }] 15 | 16 | 17 | @pytest.fixture 18 | def chat_model(mocker): 19 | # using mock llm result as output 20 | llm_config = { 21 | 'model': 'qwen', 22 | 'model_server': 'openai', 23 | 'api_base': 'http://127.0.0.1:8000/v1', 24 | 'api_key': 'EMPTY' 25 | } 26 | chat_model = OpenAi(**llm_config) 27 | mocker.patch.object( 28 | chat_model, '_chat_stream', return_value=['hello', ' there']) 29 | mocker.patch.object( 30 | chat_model, '_chat_no_stream', return_value='hello there') 31 | return chat_model 32 | 33 | 34 | def test_chat_stop_word(chat_model): 35 | stop = chat_model._update_stop_word(['observation']) 36 | assert isinstance(stop, list) 37 | assert stop == ['<|im_end|>', 'observation'] 38 | stop = chat_model._update_stop_word(None) 39 | assert isinstance(stop, list) 40 | assert stop == ['<|im_end|>'] 41 | stop = chat_model._update_stop_word([]) 42 | assert isinstance(stop, list) 43 | assert stop == ['<|im_end|>'] 44 | -------------------------------------------------------------------------------- /tests/samples/34aca18b-17a1-4558-9064-22fdfcef7a94.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/34aca18b-17a1-4558-9064-22fdfcef7a94.wav -------------------------------------------------------------------------------- /tests/samples/girl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/girl.png -------------------------------------------------------------------------------- /tests/samples/luoli15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/luoli15.jpg -------------------------------------------------------------------------------- /tests/samples/modelscope_qa_1.txt: -------------------------------------------------------------------------------- 1 | 常见问题 2 | 本文整理了 ModelScope 模型使用过程中遇到的常见问题,希望能够通过此文档为您解答使用过程中的疑惑。 3 | 4 | Q1:ModelScope 社区平台的模型支持商用吗? 5 | 6 | 开源的模型商用需要遵循开源协议,具体可参考下对应的模型的开源协议。 7 | 8 | Q2:pip install 的时候有些包下载特别慢怎么办? 9 | 10 | 在国内 pip 安装的时候,如果默认是用海外的 pypi 源的话,可能因为网络问题,下载速度受限。建议在国内可以通过"-i https://pypi.tuna.tsinghua.edu.cn/simple" 的命令行选项,来配置仓库来源使用"清华源"。例如: 11 | 12 | pip install "modelscope[nlp]" -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html -i https://pypi.tuna.tsinghua.edu.cn/simple 13 | Q3:用 git 拉取模型到本地,发现模型文件和远程仓库文件大小不一致,是什么问题? 14 | 15 | 因部分文件使用 lfs 进行存储,需要先安装 lfs 工具。请检查下是否没有安装 git lfs(pip list|grep lfs),如果没有安装,请使用 git lfs install 命令安装。具体可参见模型详情页的【快速使用】页面。 16 | 17 | Q4:我的系统目前是 Windows,使用某模型的时候报错,是什么问题? 18 | 19 | ModelScope 框架本身支持 Windows 环境的运行。但是平台上多种多样的模型,本身难免会有一些模型会部分依赖于系统环境,从而影响在 Windows 上的兼容性。 一方面您通过 ModelScope 网站上提供的 notebook 环境试用,另一方面,对于少数在 Windows 需要单独配置的模型,可以通过创建 windows linux subsystem 来模拟 Linux 环境,或者根据具体三方依赖包的官方网站说明,来在 Windows 环境下多独立安装。 20 | 21 | Q5:ModelScope 模型的使用依赖于互联网连接吗? 22 | 23 | ModelScope 通过 Model Hub 和 Dataset Hub 来进行模型和数据集的管理和版本管理。因此要获取最好的用户体验,我们建议尽量在联网环境下使用。这能确保您使用的模型和数据集都能是最新的版本,获取最好的模型和数据集。另一方面,如果您使用 ModelScope 开源模型的环境没有网络连接,那也可以通过将模型下载到本地,再从本地直接加载的方式来使用。具体范例如下: 第一步:拉取模型数据到本地: 24 | 25 | from modelscope.hub.snapshot_download import snapshot_download 26 | path = snapshot_download('damo/cv_convnextTiny_ocr-recognition-general_damo') 27 | print(path) 28 | 第二步:然后把模型数据(即 path 文件夹的内容),拷贝到一个新的本地路径 new_path. 第三部:通过本地路径来加载模型,构建 pipeline。 29 | 30 | ocr_recognition = pipeline(Tasks.ocr_recognition, model=new_path) 31 | 注意:这里需要再次强调的是,使用这种方式如果社区有模型有更新的话,则无法直接检测到。 32 | 33 | Q6:环境 mac os x86,系统 Ventura 13 Beta 13,环境安装报错“missing xcrun at: /Library/Developer/CommandLineTools/usr/bin/xcrun” 34 | 35 | MBP 新环境碰到这个问题,您需要执行 xcode-select --install。 36 | 37 | Q7:基础大模型下载下来之后如何支持下游模型? 38 | 39 | 针对大模型您可以尝试 zeroshot,fine tune 后会有更好的表现。 40 | -------------------------------------------------------------------------------- /tests/samples/modelscope_qa_2.txt: -------------------------------------------------------------------------------- 1 | Q8:多卡环境,如何指定卡推理? 2 | 3 | 推理可以传递参数 device,pipeline 参数: device 设置 'gpu:0' 即可。 4 | 5 | Q9:zero-shot 分类模型可以用下游的自己的数据作微调吗? 6 | 7 | 可以。如果您的数据 label 变化较大,出于追求模型效果,classifier 可以 init weight 处理。如果您的数据 label 变化不大,可以直接在 classifier 上继续微调。 8 | 9 | Q10:在哪里可以看得到 ModelScope 教程和实战资料? 10 | 11 | 您可以查看 ModelScope 实战训练营,点击报名后即可查看所有录制的视频课程。 12 | 13 | Q11:ModelScope 有没有已经搭好的 docker 镜像,以及我应该在哪里下载使用? 14 | 15 | ModelScope 提供 GPU 镜像和 CPU 镜像,具体可在环境安装内查看最新版本镜像信息。 16 | 17 | Q12:ModelScope 是否支持算法评测? 18 | 19 | 目前 API 支持单个模型的 finetune 和评测,批量评测功能还在持续建设中,您暂时可以写个脚本来实现。关于算法评测,可以参考这里。 20 | 21 | Q13:ModelScope 是否会推出纯离线的 SDK 版本? 22 | 23 | 现在模型大部分还是需要基于服务端的算力支持,纯端上的模型的剪枝和转化可以需要用一些工具来解决,这部分工具能力还在规划开放中。 24 | 25 | Q14:通过 SDK 上传数据集或模型时,报错“requests.exceptions.HTTPError: 400 Client Error: Bad Request for url:”怎么办? 26 | 27 | 您可以先检查下当前的 Library 版本,确认下是否为最新。然后检查下采用的 token 是否为 SDK token。若还不能解决该问题,请联系官方协助您解决。 28 | 29 | Q15:使用官方镜像,但加载模型过程中会存在报错,应该怎么解决? 30 | 您可以先通过 pip list 等方式,对照环境安装内版本号看当前镜像是否为最新版本,若非最新版本,可更新后重试。若重试依然无法解决问题,请通过官方钉钉群联系我们。 31 | 32 | Q16: 模型大文件上传遇到问题如何解决? 33 | 模型文件一般都比较大,我们通过 git lfs 管理模型中的大文件,首先确保您安装了正确版本的 git-lfs, 另外请确保您的大文件在文件列表中(.gitattributes 文件). 34 | -------------------------------------------------------------------------------- /tests/samples/ms_intro.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/ms_intro.png -------------------------------------------------------------------------------- /tests/samples/rag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/rag.png -------------------------------------------------------------------------------- /tests/samples/rag2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/rag2.png -------------------------------------------------------------------------------- /tests/samples/rag3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/rag3.jpg -------------------------------------------------------------------------------- /tests/samples/常见QA.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/samples/常见QA.pdf -------------------------------------------------------------------------------- /tests/storage/test_file_storage.py: -------------------------------------------------------------------------------- 1 | # not implement yet 2 | -------------------------------------------------------------------------------- /tests/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/tools/__init__.py -------------------------------------------------------------------------------- /tests/tools/test_hf_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from modelscope_agent.tools.hf_tool import HFTool 3 | 4 | # from transformers import load_tool 5 | 6 | 7 | @pytest.mark.skip() 8 | def test_is_hf_tool(): 9 | # test this tool should only be initialized by transformers.tools.tool 10 | with pytest.raises(ValueError) as e: 11 | HFTool('mock_tool') 12 | exec_msg = e.value.args[0] 13 | assert (exec_msg == 'HFTool should be type of HF tool') 14 | 15 | 16 | @pytest.mark.skip() 17 | def test_run_langchin_tool(): 18 | # test run hf tool 19 | tool = load_tool('translation') 20 | name = 'translator' 21 | description = 'This is a tool that translates text from a language to another. ' 22 | parameters = [{ 23 | 'name': 'text', 24 | 'type': 'string', 25 | 'description': 'the text to translate', 26 | 'required': True 27 | }, { 28 | 'name': 'src_lang', 29 | 'type': 'string', 30 | 'description': 'the language of the text to translate', 31 | 'required': True 32 | }, { 33 | 'name': 'tgt_lang', 34 | 'type': 'string', 35 | 'description': 'the language for the desired ouput language', 36 | 'required': True 37 | }] 38 | shell_tool = HFTool( 39 | tool, name=name, description=description, parameters=parameters) 40 | input = """{'text': 'Hello','src_lang':'English','tgt_lang':'French'}""" 41 | res = shell_tool.call(input) 42 | print(res) 43 | assert res == '"Je vous salue."' 44 | -------------------------------------------------------------------------------- /tests/tools/test_image_enhancement.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.agents.role_play import RolePlay # NOQA 2 | import os 3 | 4 | import json 5 | import pytest 6 | from modelscope_agent.tools.dashscope_tools.image_enhancement import \ 7 | ImageEnhancement 8 | 9 | IS_FORKED_PR = os.getenv('IS_FORKED_PR', 'false') == 'true' 10 | 11 | 12 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 13 | def test_image_enhancement(): 14 | image_url = 'luoli15.jpg' 15 | kwargs = {'input.image_path': image_url, 'parameters.upscale': 2} 16 | phantom = ImageEnhancement() 17 | res = phantom.call(json.dumps(kwargs)) 18 | assert (res.startswith('![IMAGEGEN](http')) 19 | 20 | 21 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 22 | def test_image_enhancement_agent(): 23 | role_template = '你扮演一个绘画家,用尽可能丰富的描述调用工具绘制各种风格的图画。' 24 | 25 | llm_config = {'model': 'qwen-max', 'model_server': 'dashscope'} 26 | 27 | # input tool args 28 | function_list = ['image_enhancement'] 29 | 30 | bot = RolePlay( 31 | function_list=function_list, llm=llm_config, instruction=role_template) 32 | 33 | response = bot.run('[上传文件 "luoli15.jpg"], 2倍超分这张图') 34 | text = '' 35 | for chunk in response: 36 | text += chunk 37 | print(text) 38 | assert isinstance(text, str) 39 | -------------------------------------------------------------------------------- /tests/tools/test_image_gen.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from modelscope_agent.agents.role_play import RolePlay 5 | from modelscope_agent.tools.dashscope_tools.image_generation import \ 6 | TextToImageTool 7 | 8 | IS_FORKED_PR = os.getenv('IS_FORKED_PR', 'false') == 'true' 9 | 10 | 11 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 12 | def test_image_gen(): 13 | params = """{'text': '画一只小猫', 'resolution': '1024*1024'}""" 14 | 15 | t2i = TextToImageTool() 16 | res = t2i.call(params) 17 | assert (res.startswith('![IMAGEGEN](')) 18 | 19 | 20 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 21 | def test_image_gen_wrong_resolution(): 22 | params = """{'text': '画一只小猫', 'resolution': '1024'}""" 23 | 24 | t2i = TextToImageTool() 25 | res = t2i.call(params) 26 | assert (res.startswith('![IMAGEGEN](')) 27 | 28 | 29 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 30 | def test_image_gen_with_lora(): 31 | params = """{'text': '画一只小猫', 'resolution': '1024*1024', 'lora_index': 'wanx1.4.5_textlora_huiben2_20240518'}""" 32 | t2i = TextToImageTool() 33 | res = t2i.call(params) 34 | assert (res.startswith('![IMAGEGEN](')) 35 | 36 | 37 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 38 | def test_image_gen_role(): 39 | role_template = '你扮演一个画家,用尽可能丰富的描述调用工具绘制图像。' 40 | 41 | llm_config = {'model': 'qwen-max', 'model_server': 'dashscope'} 42 | 43 | # input tool args 44 | function_list = ['image_gen'] 45 | 46 | bot = RolePlay( 47 | function_list=function_list, llm=llm_config, instruction=role_template) 48 | 49 | response = bot.run('画一张猫的图像') 50 | 51 | text = '' 52 | for chunk in response: 53 | text += chunk 54 | print(text) 55 | assert isinstance(text, str) 56 | -------------------------------------------------------------------------------- /tests/tools/test_image_gen_lite.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from modelscope_agent.agents.role_play import RolePlay 5 | from modelscope_agent.tools.dashscope_tools.image_generation_lite import \ 6 | TextToImageLiteTool 7 | 8 | 9 | @pytest.mark.skip(reason='only run with authorized keys') 10 | def test_image_gen_lite(): 11 | params = """ 12 | {'text': '一只可爱的小兔子正在花园里努力地拔一个大萝卜,周围是绿油油的草地和鲜艳的花朵,天空是清澈的蓝色,太阳公公笑眯眯地看着。', 13 | 'lora_index': 'wanxlite1.4.5_lora_huibenlite1_20240519', 14 | 'resolution': '1024*1024' 15 | } 16 | """ 17 | t2i = TextToImageLiteTool() 18 | res = t2i.call(params) 19 | assert (res.startswith('![IMAGEGEN](')) 20 | 21 | 22 | @pytest.mark.skip(reason='only run with authorized keys') 23 | def test_image_gen_lite_role(): 24 | role_template = '扮演一个绘本小助手,可以利用工具来创建符合儿童的童话绘本图片' 25 | 26 | llm_config = {'model': 'qwen-max', 'model_server': 'dashscope'} 27 | 28 | # input tool args 29 | function_list = ['image_gen_lite'] 30 | 31 | bot = RolePlay( 32 | function_list=function_list, llm=llm_config, instruction=role_template) 33 | 34 | response = bot.run('绘制一个小兔子拔萝卜的场景,使用lora来控制风格') 35 | text = '' 36 | for chunk in response: 37 | text += chunk 38 | assert isinstance(text, str) 39 | -------------------------------------------------------------------------------- /tests/tools/test_langchain_tool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from langchain_community.tools import ShellTool 3 | from modelscope_agent.tools.langchain_proxy_tool import LangchainTool 4 | 5 | 6 | def test_is_langchain_tool(): 7 | # test this tool should only be initialized by langchain.tools.tool 8 | with pytest.raises(ValueError) as e: 9 | LangchainTool('mock_tool') 10 | exec_msg = e.value.args[0] 11 | assert (exec_msg == 'langchain_tool should be type of langchain tool') 12 | 13 | 14 | def test_run_langchin_tool(): 15 | # test run langchain tool 16 | shell_tool = LangchainTool(ShellTool()) 17 | input = """{'commands': ["echo 'Hello World!'"]}""" 18 | res = shell_tool.call(input) 19 | print(res) 20 | assert res == 'Hello World!\n' 21 | -------------------------------------------------------------------------------- /tests/tools/test_service_proxy.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from modelscope_agent.tools.base import ToolServiceProxy 5 | 6 | IN_GITHUB_ACTIONS = os.getenv('GITHUB_ACTIONS') == 'true' 7 | 8 | 9 | @pytest.mark.skipif(IN_GITHUB_ACTIONS, reason='no need to run this test on ci') 10 | def test_tool_service(): 11 | try: 12 | tool_service = ToolServiceProxy('RenewInstance', {'test': 'xxx'}) 13 | 14 | result = tool_service.call( 15 | "{\"instance_id\": 123, \"period\": \"mon\"}") 16 | except Exception as e: 17 | assert False, f'Failed to initialize tool service with error: {e}' 18 | 19 | assert result == "{'result': '已完成ECS实例ID为123的续费,续费时长mon月'}" 20 | -------------------------------------------------------------------------------- /tests/tools/test_too_output_wrapper.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | from modelscope_agent.tools.utils.output_wrapper import (AudioWrapper, 5 | ImageWrapper, 6 | OutputWrapper, 7 | VideoWrapper) 8 | from PIL import Image 9 | 10 | 11 | def test_audio_wrapper(): 12 | audio = b'an binary audio sequence' 13 | audio = AudioWrapper(audio) 14 | 15 | assert isinstance(audio.raw_data, bytes) 16 | assert os.path.exists(audio.path) 17 | 18 | 19 | def test_image_wrapper(): 20 | # generate a random image with numpy array 21 | img = np.random.randint(0, 255, size=(100, 100, 3), dtype=np.uint8) 22 | img = ImageWrapper(img) 23 | 24 | assert isinstance(img.raw_data, Image.Image) 25 | assert os.path.exists(img.path) 26 | 27 | 28 | def test_video_wrapper(): 29 | # TODO: make sure input, output follow the expectation 30 | pass 31 | -------------------------------------------------------------------------------- /tests/tools/test_weather.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.agent import Agent # NOQA 2 | 3 | import os 4 | 5 | import pytest 6 | 7 | IS_FORKED_PR = os.getenv('IS_FORKED_PR', 'false') == 'true' 8 | 9 | 10 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 11 | def test_weather_role(): 12 | llm_config = { 13 | 'model': 'Qwen/Qwen2.5-72B-Instruct', 14 | 'model_server': 'openai', 15 | 'api_base': 'https://api-inference.modelscope.cn/v1/', 16 | 'api_key': os.getenv('MODELSCOPE_API_KEY') 17 | } 18 | 19 | # input tool name 20 | mcp_servers = { 21 | 'mcpServers': { 22 | 'time': { 23 | 'type': 24 | 'sse', 25 | 'url': 26 | 'https://agenttor-mod-dd-cbwtrtihpn.cn-zhangjiakou.fcapp.run/sse' 27 | }, 28 | 'fetch': { 29 | 'type': 30 | 'sse', 31 | 'url': 32 | 'https://mcp-cdb79f47-15a7-4a72.api-inference.modelscope.cn/sse' 33 | } 34 | } 35 | } 36 | 37 | default_system = ( 38 | 'You are an assistant which helps me to finish a complex job. Tools may be given to you ' 39 | 'and you must choose some of them one per round to finish my request.') 40 | bot = Agent(mcp=mcp_servers, llm=llm_config, instruction=default_system) 41 | 42 | response = bot.run('今天是哪一天?今天热门人工智能新闻有哪些?') 43 | 44 | text = '' 45 | for chunk in response: 46 | text += chunk 47 | print(text) 48 | assert isinstance(text, str) 49 | 50 | 51 | test_weather_role() 52 | -------------------------------------------------------------------------------- /tests/tools/test_web_browsing.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.tools.web_browser import WebBrowser 2 | 3 | 4 | def test_web_browser(): 5 | # test web browsing 6 | params = """{'urls': 'https://blog.sina.com.cn/zhangwuchang'}""" 7 | web_browser = WebBrowser() 8 | res = web_browser.call(params=params) 9 | 10 | assert isinstance(res, str) 11 | assert len(res) == 2000 12 | assert '张五常' in res 13 | 14 | 15 | def test_web_browser_with_length(): 16 | # test web browsing 17 | params = """{'urls': 'https://blog.sina.com.cn/zhangwuchang'}""" 18 | web_browser = WebBrowser() 19 | res = web_browser.call(params=params, max_browser_length=100) 20 | 21 | assert isinstance(res, str) 22 | assert len(res) == 100 23 | 24 | 25 | def test_web_browser_with_adv(): 26 | params = """{'urls': ['https://www.bing.com/search?q=拜登当选总统的年份是哪一年']}""" 27 | tool_config = {'web_browser': {'use_adv': True}} 28 | web_browser = WebBrowser(tool_config) 29 | res = web_browser.call(params=params) 30 | assert isinstance(res, str) 31 | 32 | 33 | # def test_integrated_web_browser_agent(): 34 | # llm = MockLLM('') 35 | # 36 | # tools = {'web_browser': WebBrowser()} 37 | # prompt_generator = MockPromptGenerator() 38 | # url = 'https://blog.sina.com.cn/zhangwuchang' 39 | # action_parser = MockOutParser('web_browser', {'urls': [url]}) 40 | # agent = AgentExecutor( 41 | # llm, 42 | # additional_tool_list=tools, 43 | # prompt_generator=prompt_generator, 44 | # action_parser=action_parser, 45 | # tool_retrieval=False, 46 | # ) 47 | # res = agent.run('please search the information about zhangwuchang') 48 | # print(res) 49 | # assert url == res[0]['result'][0]['url'] 50 | # assert '张五常' in res[0]['result'][0]['content'] 51 | -------------------------------------------------------------------------------- /tests/tools/test_web_search.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import json 4 | import pytest 5 | from modelscope_agent.tools.web_search.web_search import WebSearch 6 | 7 | IS_FORKED_PR = os.getenv('IS_FORKED_PR', 'false') == 'true' 8 | 9 | 10 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 11 | def test_web_search(): 12 | input_params = """{'query': '2024元旦 哈尔滨 天气'}""" 13 | web_searcher = WebSearch() 14 | res = web_searcher.call(input_params) 15 | 16 | assert isinstance(res, str) 17 | json_res = json.loads(res) 18 | for item in json_res: 19 | assert item['link'] or item['sniper'] 20 | 21 | 22 | # 23 | # def test_web_search_agent(): 24 | # responses = [ 25 | # "<|startofthink|>{\"api_name\": \"web_search_utils\", \"parameters\": " 26 | # "{\"query\": \"2024元旦 哈尔滨 天气\"}}<|endofthink|>", 'summarize' 27 | # ] 28 | # llm = MockLLM(responses) 29 | # 30 | # tools = {'web_search_utils': WebSearch()} 31 | # prompt_generator = MockPromptGenerator() 32 | # action_parser = MockOutParser('web_search_utils', 33 | # {'query': '2024元旦 哈尔滨 天气'}) 34 | # 35 | # agent = AgentExecutor( 36 | # llm, 37 | # additional_tool_list=tools, 38 | # prompt_generator=prompt_generator, 39 | # action_parser=action_parser, 40 | # tool_retrieval=False, 41 | # ) 42 | # res = agent.run('帮我查询2024年元旦时哈尔滨天气情况') 43 | # print(res) 44 | # 45 | # for item in res[0]['result']: 46 | # assert item['link'] and item['sniper'] 47 | -------------------------------------------------------------------------------- /tests/tools/test_wordart_tool.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from modelscope_agent.tools.dashscope_tools.wordart_tool import WordArtTexture 5 | 6 | from modelscope_agent.agents.role_play import RolePlay # NOQA 7 | 8 | IS_FORKED_PR = os.getenv('IS_FORKED_PR', 'false') == 'true' 9 | 10 | 11 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 12 | def test_word_art(): 13 | params = """{ 14 | 'input.text.text_content': '魔搭社区', 15 | 'input.prompt': '一片绿色的森林里开着小花', 16 | 'input.texture_style': 'scene', 17 | 'input.text.output_image_ratio': '9:16' 18 | }""" 19 | wa = WordArtTexture() 20 | res = wa.call(params) 21 | print(res) 22 | assert (res.startswith('![IMAGEGEN](https://')) 23 | 24 | 25 | @pytest.mark.skipif(IS_FORKED_PR, reason='only run modelscope-agent main repo') 26 | def test_word_art_role(): 27 | role_template = '你扮演一个美术老师,用尽可能丰富的描述调用工具生成艺术字图片。' 28 | 29 | llm_config = {'model': 'qwen-max', 'model_server': 'dashscope'} 30 | 31 | # input tool args 32 | function_list = ['wordart_texture_generation'] 33 | 34 | bot = RolePlay( 35 | function_list=function_list, llm=llm_config, instruction=role_template) 36 | 37 | response = bot.run('文字内容:你好新年,风格:海洋,纹理风格:默认,宽高比:16:9') 38 | text = '' 39 | for chunk in response: 40 | text += chunk 41 | print(text) 42 | assert isinstance(text, str) 43 | -------------------------------------------------------------------------------- /tests/ut_utils.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | from modelscope_agent.tools.base import BaseTool 4 | 5 | 6 | def is_docker_daemon_running(): 7 | try: 8 | # Run the 'docker info' command to check if Docker daemon is running 9 | result = subprocess.run(['docker', 'info'], 10 | stdout=subprocess.PIPE, 11 | stderr=subprocess.PIPE, 12 | check=True) 13 | print(result.stdout.decode('utf-8')) 14 | # If 'docker info' command runs successfully, the daemon is running 15 | return True 16 | except subprocess.CalledProcessError: 17 | # The 'docker info' command failed, so the daemon is not running or not reachable 18 | return False 19 | except FileNotFoundError: 20 | # The 'docker' command is not available in the system's path, so Docker is likely not installed 21 | return False 22 | 23 | 24 | class MockTool(BaseTool): 25 | name: str = 'mock_tool' 26 | description: str = 'description' 27 | parameters: list = [{ 28 | 'name': 'test', 29 | 'type': 'string', 30 | 'description': 'test variable', 31 | 'required': False 32 | }] 33 | 34 | def call(self, params: str, **kwargs): 35 | return params 36 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | from modelscope_agent.tools.base import BaseTool 2 | 3 | 4 | class MockTool(BaseTool): 5 | name: str = 'mock_tool' 6 | description: str = 'description' 7 | parameters: list = [{ 8 | 'name': 'test', 9 | 'type': 'string', 10 | 'description': 'test variable', 11 | 'required': False 12 | }] 13 | 14 | def call(self, params: str, **kwargs): 15 | return params 16 | -------------------------------------------------------------------------------- /tests/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/modelscope/modelscope-agent/4238cf9da5b7a7e2e5819794d8e5d5648dc33c56/tests/utils/__init__.py -------------------------------------------------------------------------------- /tests/utils/test_git_clone.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import tempfile 4 | 5 | import pytest 6 | from modelscope_agent.utils.git import clone_git_repository 7 | 8 | 9 | @pytest.fixture(scope='function') 10 | def temp_dir(): 11 | # create temp dir 12 | temp_directory = tempfile.mkdtemp() 13 | yield temp_directory 14 | # delete temp dir after test 15 | shutil.rmtree(temp_directory) 16 | 17 | 18 | def test_clone_git_repository_success(temp_dir): 19 | # use temp dir as folder name 20 | repo_url = 'http://www.modelscope.cn/studios/zhicheng/zzc_tool_test.git' 21 | branch_name = 'master' 22 | folder_name = temp_dir 23 | 24 | # store the git to local dir 25 | clone_git_repository(repo_url, branch_name, folder_name) 26 | 27 | # check if success 28 | assert os.listdir( 29 | folder_name) != [], 'Directory should not be empty after cloning' 30 | 31 | 32 | def test_clone_git_repository_failed(temp_dir): 33 | # use temp dir as folder name 34 | repo_url = 'http://www.modelscope.cn/studios/zhicheng/zzc_tool_test1.git' 35 | branch_name = 'master' 36 | folder_name = temp_dir 37 | 38 | # store the git to local dir 39 | with pytest.raises(RuntimeError): 40 | clone_git_repository(repo_url, branch_name, folder_name) 41 | 42 | # check if error 43 | assert os.listdir( 44 | folder_name) == [], 'Directory should not be empty after cloning' 45 | -------------------------------------------------------------------------------- /tests/utils/test_token_count.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | from modelscope_agent.memory.memory_with_retrieval_knowledge import \ 5 | MemoryWithRetrievalKnowledge 6 | from modelscope_agent.schemas import Message 7 | 8 | current_file_dir = os.path.dirname(os.path.abspath(__file__)) 9 | parent_dir = os.path.dirname(current_file_dir) 10 | 11 | 12 | @pytest.fixture 13 | def temporary_storage(tmpdir): 14 | # Use a temporary directory for testing storage 15 | return str(tmpdir.mkdir('knowledge_vector_test')) 16 | 17 | 18 | def test_memory_with_retrieval_knowledge(temporary_storage): 19 | random_name = 'test_memory_agent' 20 | 21 | memory = MemoryWithRetrievalKnowledge( 22 | storage_path=temporary_storage, 23 | name=random_name, 24 | memory_path=temporary_storage, 25 | ) 26 | # 1. test get history token count 27 | history_token_count_1 = memory.get_history_token_count() 28 | assert isinstance(history_token_count_1, int) 29 | 30 | # 2. test update history 31 | msg = [ 32 | Message(role='system', content='test token counting'), 33 | Message(role='user', content='test token counting'), 34 | ] 35 | memory.update_history(msg) 36 | history_token_count_2 = memory.get_history_token_count() 37 | assert isinstance(history_token_count_2, int) 38 | assert history_token_count_2 > history_token_count_1 39 | 40 | # 3. test pop history 41 | memory.pop_history() 42 | history_token_count_3 = memory.get_history_token_count() 43 | assert isinstance(history_token_count_3, int) 44 | assert history_token_count_2 > history_token_count_3 > 0 45 | 46 | # 4. test clear history 47 | memory.clear_history() 48 | history_token_count_4 = memory.get_history_token_count() 49 | assert isinstance(history_token_count_4, int) 50 | assert history_token_count_4 == 0 51 | --------------------------------------------------------------------------------