├── .DS_Store
├── .dockerignore
├── .env.example
├── .gitignore
├── .pre-commit-config.yaml
├── .pylintrc
├── Dockerfile
├── LICENSE
├── README.md
├── README_zh-CN.md
├── assets
├── logo.svg
├── mindsearch_openset.png
└── teaser.gif
├── backend_example.py
├── docker
├── README.md
├── README_zh-CN.md
├── msdl
│ ├── __init__.py
│ ├── __main__.py
│ ├── config.py
│ ├── docker_manager.py
│ ├── i18n.py
│ ├── templates
│ │ ├── backend
│ │ │ ├── cloud_llm.dockerfile
│ │ │ └── local_llm.dockerfile
│ │ ├── docker-compose.yaml
│ │ └── frontend
│ │ │ └── react.dockerfile
│ ├── translations
│ │ ├── en.yaml
│ │ └── zh_CN.yaml
│ ├── user_interaction.py
│ └── utils.py
└── setup.py
├── frontend
├── React
│ ├── .gitignore
│ ├── .prettierignore
│ ├── .prettierrc.json
│ ├── README.md
│ ├── README_zh-CN.md
│ ├── index.html
│ ├── package-lock.json
│ ├── package.json
│ ├── src
│ │ ├── App.module.less
│ │ ├── App.tsx
│ │ ├── assets
│ │ │ ├── background.png
│ │ │ ├── fold-icon.svg
│ │ │ ├── logo.svg
│ │ │ ├── pack-up.svg
│ │ │ ├── sendIcon.svg
│ │ │ ├── show-right-icon.png
│ │ │ └── unflod-icon.svg
│ │ ├── global.d.ts
│ │ ├── index.less
│ │ ├── index.tsx
│ │ ├── pages
│ │ │ └── mindsearch
│ │ │ │ ├── assets
│ │ │ │ ├── bookmark-icon.svg
│ │ │ │ ├── empty-chat-right.svg
│ │ │ │ ├── fold-icon.svg
│ │ │ │ ├── logo.svg
│ │ │ │ ├── logo1.svg
│ │ │ │ ├── mindsearch-avatar.svg
│ │ │ │ ├── pack-up-disabled.svg
│ │ │ │ ├── pack-up.svg
│ │ │ │ ├── sendIcon.svg
│ │ │ │ ├── think-progress-icon.svg
│ │ │ │ └── unflod-icon.svg
│ │ │ │ ├── components
│ │ │ │ ├── answer
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ ├── index.tsx
│ │ │ │ │ └── loading-animation
│ │ │ │ │ │ ├── index.module.less
│ │ │ │ │ │ └── index.tsx
│ │ │ │ ├── chat-right
│ │ │ │ │ ├── components
│ │ │ │ │ │ ├── empty-placeholder
│ │ │ │ │ │ │ ├── index.module.less
│ │ │ │ │ │ │ └── index.tsx
│ │ │ │ │ │ ├── query-item
│ │ │ │ │ │ │ ├── index.module.less
│ │ │ │ │ │ │ └── index.tsx
│ │ │ │ │ │ └── search-item
│ │ │ │ │ │ │ ├── index.module.less
│ │ │ │ │ │ │ └── index.tsx
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ └── index.tsx
│ │ │ │ ├── custom-markdown
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ └── index.tsx
│ │ │ │ ├── iconfont
│ │ │ │ │ └── index.tsx
│ │ │ │ ├── loading
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ └── index.tsx
│ │ │ │ ├── mind-map-item
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ └── index.tsx
│ │ │ │ ├── mind-map
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ └── index.tsx
│ │ │ │ ├── notice
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ └── index.tsx
│ │ │ │ └── session-item
│ │ │ │ │ ├── index.module.less
│ │ │ │ │ └── index.tsx
│ │ │ │ ├── index.module.less
│ │ │ │ ├── index.tsx
│ │ │ │ ├── provider
│ │ │ │ └── context.tsx
│ │ │ │ └── utils
│ │ │ │ └── tools.ts
│ │ ├── routes
│ │ │ └── routes.tsx
│ │ ├── styles
│ │ │ ├── fn.less
│ │ │ └── var.less
│ │ └── vite-env.d.ts
│ ├── tsconfig.json
│ └── vite.config.ts
├── css
│ └── gradio_front.css
├── gradio_agentchatbot
│ ├── __init__.py
│ ├── agentchatbot.py
│ ├── chat_interface.py
│ ├── templates
│ │ └── component
│ │ │ ├── assets
│ │ │ └── worker-lPYB70QI.js
│ │ │ ├── index.js
│ │ │ └── style.css
│ └── utils.py
├── mindsearch_gradio.py
└── mindsearch_streamlit.py
├── mindsearch
├── __init__.py
├── agent
│ ├── __init__.py
│ ├── graph.py
│ ├── mindsearch_agent.py
│ ├── mindsearch_prompt.py
│ ├── models.py
│ └── streaming.py
├── app.py
└── terminal.py
└── requirements.txt
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InternLM/MindSearch/bfb41be176f95700bcf199be69b001096425bbbd/.DS_Store
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | **/node_modules
2 | **/dist
3 | **/.git
4 | **/.gitignore
5 | **/.vscode
6 | **/README.md
7 | **/LICENSE
8 | **/.env
9 | **/npm-debug.log
10 | **/yarn-debug.log
11 | **/yarn-error.log
12 | **/.pnpm-debug.log
13 |
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
1 | OPENAI_API_KEY=
2 | OPENAI_API_BASE=
3 | OPENAI_MODEL=
4 | SILICON_API_KEY=
5 | SILICON_MODEL=
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[ciod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
164 | .env
165 | temp
166 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | exclude: ^(tests/data|scripts|frontend/React)/
2 | repos:
3 | - repo: https://github.com/PyCQA/flake8
4 | rev: 7.0.0
5 | hooks:
6 | - id: flake8
7 | args: ["--max-line-length=120"]
8 | - repo: https://github.com/PyCQA/isort
9 | rev: 5.13.2
10 | hooks:
11 | - id: isort
12 | - repo: https://github.com/pre-commit/mirrors-yapf
13 | rev: v0.32.0
14 | hooks:
15 | - id: yapf
16 | - repo: https://github.com/pre-commit/pre-commit-hooks
17 | rev: v4.5.0
18 | hooks:
19 | - id: trailing-whitespace
20 | - id: check-yaml
21 | - id: end-of-file-fixer
22 | - id: requirements-txt-fixer
23 | - id: double-quote-string-fixer
24 | - id: check-merge-conflict
25 | - id: fix-encoding-pragma
26 | args: ["--remove"]
27 | - id: mixed-line-ending
28 | args: ["--fix=lf"]
29 | - repo: https://github.com/executablebooks/mdformat
30 | rev: 0.7.17
31 | hooks:
32 | - id: mdformat
33 | args: ["--number"]
34 | additional_dependencies:
35 | - mdformat-openmmlab
36 | - mdformat_frontmatter
37 | - linkify-it-py
38 | - repo: https://github.com/codespell-project/codespell
39 | rev: v2.2.6
40 | hooks:
41 | - id: codespell
42 | - repo: https://github.com/asottile/pyupgrade
43 | rev: v3.15.0
44 | hooks:
45 | - id: pyupgrade
46 | args: ["--py36-plus"]
47 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM continuumio/miniconda3
2 |
3 | ARG OPENAI_API_KEY
4 | ENV OPENAI_API_KEY=${OPENAI_API_KEY}
5 |
6 | ARG BING_API_KEY
7 | ENV BING_API_KEY=${BING_API_KEY}
8 |
9 | # 设置环境变量
10 | ENV PATH=/opt/conda/bin:$PATH
11 |
12 | # 克隆git仓库
13 | RUN git clone https://github.com/InternLM/MindSearch.git /app
14 |
15 | WORKDIR /app
16 |
17 | # 创建并激活 fastapi 环境,并安装依赖包
18 | RUN conda create --name fastapi python=3.10 -y && \
19 | conda run -n fastapi pip install -r requirements.txt && \
20 | conda clean --all -f -y
21 |
22 | # 暴露 FastAPI 默认端口
23 | EXPOSE 8000
24 |
25 | # 启动 FastAPI 服务
26 | ENTRYPOINT ["conda", "run", "--no-capture-output", "-n", "fastapi"]
27 | CMD ["python3", "-m", "mindsearch.app", "--asy", "--host", "0.0.0.0", "--port", "8002"]
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | [📃 Paper](https://arxiv.org/abs/2407.20183) | [💻 Demo](https://internlm-chat.intern-ai.org.cn/)
8 |
9 | English | [简体中文](README_zh-CN.md)
10 |
11 |
12 |
13 |
14 |
15 |
16 | ## ✨ MindSearch: Mimicking Human Minds Elicits Deep AI Searcher
17 |
18 | ## 📅 Changelog
19 |
20 | - 2024/11/05: 🥳 MindSearch is now deployed on Puyu! 👉 [Try it](https://internlm-chat.intern-ai.org.cn/) 👈
21 | - Refactored the agent module based on [Lagent v0.5](https://github.com/InternLM/lagent) for better performance in concurrency.
22 | - Improved the UI to embody the simultaneous multi-query search.
23 |
24 |
25 | ## ⚽️ Build Your Own MindSearch
26 |
27 | ### Step1: Dependencies Installation
28 |
29 | ```bash
30 | git clone https://github.com/InternLM/MindSearch
31 | cd MindSearch
32 | pip install -r requirements.txt
33 | ```
34 |
35 | ### Step2: Setup Environment Variables
36 |
37 | Before setting up the API, you need to configure environment variables. Rename the `.env.example` file to `.env` and fill in the required values.
38 |
39 | ```bash
40 | mv .env.example .env
41 | # Open .env and add your keys and model configurations
42 | ```
43 |
44 | ### Step3: Setup MindSearch API
45 |
46 | Setup FastAPI Server.
47 |
48 | ```bash
49 | python -m mindsearch.app --lang en --model_format internlm_server --search_engine DuckDuckGoSearch --asy
50 | ```
51 |
52 | - `--lang`: language of the model, `en` for English and `cn` for Chinese.
53 | - `--model_format`: format of the model.
54 | - `internlm_server` for InternLM2.5-7b-chat with local server. (InternLM2.5-7b-chat has been better optimized for Chinese.)
55 | - `gpt4` for GPT4.
56 | if you want to use other models, please modify [models](./mindsearch/agent/models.py)
57 | - `--search_engine`: Search engine.
58 | - `DuckDuckGoSearch` for search engine for DuckDuckGo.
59 | - `BingSearch` for Bing search engine.
60 | - `BraveSearch` for Brave search web api engine.
61 | - `GoogleSearch` for Google Serper web search api engine.
62 | - `TencentSearch` for Tencent search api engine.
63 |
64 | Please set your Web Search engine API key as the `WEB_SEARCH_API_KEY` environment variable unless you are using `DuckDuckGo`, or `TencentSearch` that requires secret id as `TENCENT_SEARCH_SECRET_ID` and secret key as `TENCENT_SEARCH_SECRET_KEY`.
65 | - `--asy`: deploy asynchronous agents.
66 |
67 | ### Step4: Setup MindSearch Frontend
68 |
69 | Providing following frontend interfaces,
70 |
71 | - React
72 |
73 | First configurate the backend URL for Vite proxy.
74 |
75 | ```bash
76 | HOST="127.0.0.1" # modify as you need
77 | PORT=8002
78 | sed -i -r "s/target:\s*\"\"/target: \"${HOST}:${PORT}\"/" frontend/React/vite.config.ts
79 | ```
80 |
81 | ```bash
82 | # Install Node.js and npm
83 | # for Ubuntu
84 | sudo apt install nodejs npm
85 |
86 | # for windows
87 | # download from https://nodejs.org/zh-cn/download/prebuilt-installer
88 |
89 | # Install dependencies
90 |
91 | cd frontend/React
92 | npm install
93 | npm start
94 | ```
95 |
96 | Details can be found in [React](./frontend/React/README.md)
97 |
98 | - Gradio
99 |
100 | ```bash
101 | python frontend/mindsearch_gradio.py
102 | ```
103 |
104 | - Streamlit
105 |
106 | ```bash
107 | streamlit run frontend/mindsearch_streamlit.py
108 | ```
109 |
110 | ## 🌐 Change Web Search API
111 |
112 | To use a different type of web search API, modify the `searcher_type` attribute in the `searcher_cfg` located in `mindsearch/agent/__init__.py`. Currently supported web search APIs include:
113 |
114 | - `GoogleSearch`
115 | - `DuckDuckGoSearch`
116 | - `BraveSearch`
117 | - `BingSearch`
118 | - `TencentSearch`
119 |
120 | For example, to change to the Brave Search API, you would configure it as follows:
121 |
122 | ```python
123 | BingBrowser(
124 | searcher_type='BraveSearch',
125 | topk=2,
126 | api_key=os.environ.get('BRAVE_API_KEY', 'YOUR BRAVE API')
127 | )
128 | ```
129 |
130 | ## 🐞 Using the Backend Without Frontend
131 |
132 | For users who prefer to interact with the backend directly, use the `backend_example.py` script. This script demonstrates how to send a query to the backend and process the response.
133 |
134 | ```bash
135 | python backend_example.py
136 | ```
137 |
138 | Make sure you have set up the environment variables and the backend is running before executing the script.
139 |
140 | ## 🐞 Debug Locally
141 |
142 | ```bash
143 | python -m mindsearch.terminal
144 | ```
145 |
146 | ## 📝 License
147 |
148 | This project is released under the [Apache 2.0 license](LICENSE).
149 |
150 | ## Citation
151 |
152 | If you find this project useful in your research, please consider cite:
153 |
154 | ```
155 | @article{chen2024mindsearch,
156 | title={MindSearch: Mimicking Human Minds Elicits Deep AI Searcher},
157 | author={Chen, Zehui and Liu, Kuikun and Wang, Qiuchen and Liu, Jiangning and Zhang, Wenwei and Chen, Kai and Zhao, Feng},
158 | journal={arXiv preprint arXiv:2407.20183},
159 | year={2024}
160 | }
161 | ```
162 |
163 | ## Our Projects
164 |
165 | Explore our additional research on large language models, focusing on LLM agents.
166 |
167 | - [Lagent](https://github.com/InternLM/lagent): A lightweight framework for building LLM-based agents
168 | - [AgentFLAN](https://github.com/InternLM/Agent-FLAN): An innovative approach for constructing and training with high-quality agent datasets (ACL 2024 Findings)
169 | - [T-Eval](https://github.com/open-compass/T-Eval): A Fine-grained tool utilization evaluation benchmark (ACL 2024)
170 |
--------------------------------------------------------------------------------
/README_zh-CN.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | [📃 Paper](https://arxiv.org/abs/2407.20183) | [💻 浦语入口](https://internlm-chat.intern-ai.org.cn/)
8 |
9 | [English](README.md) | 简体中文
10 |
11 |
12 |
13 |
14 |
15 |
16 | ## ✨ MindSearch: Mimicking Human Minds Elicits Deep AI Searcher
17 |
18 | MindSearch 是一个开源的 AI 搜索引擎框架,具有与 Perplexity.ai Pro 相同的性能。您可以轻松部署它来构建您自己的搜索引擎,可以使用闭源 LLM(如 GPT、Claude)或开源 LLM([InternLM2.5 系列模型](https://huggingface.co/internlm/internlm2_5-7b-chat)经过专门优化,能够在 MindSearch 框架中提供卓越的性能;其他开源模型没做过具体测试)。其拥有以下特性:
19 |
20 | - 🤔 **任何想知道的问题**:MindSearch 通过搜索解决你在生活中遇到的各种问题
21 | - 📚 **深度知识探索**:MindSearch 通过数百网页的浏览,提供更广泛、深层次的答案
22 | - 🔍 **透明的解决方案路径**:MindSearch 提供了思考路径、搜索关键词等完整的内容,提高回复的可信度和可用性。
23 | - 💻 **多种用户界面**:为用户提供各种接口,包括 React、Gradio、Streamlit 和本地调试。根据需要选择任意类型。
24 | - 🧠 **动态图构建过程**:MindSearch 将用户查询分解为图中的子问题节点,并根据 WebSearcher 的搜索结果逐步扩展图。
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 | ## ⚡️ MindSearch VS 其他 AI 搜索引擎
33 |
34 | 在深度、广度和生成响应的准确性三个方面,对 ChatGPT-Web、Perplexity.ai(Pro)和 MindSearch 的表现进行比较。评估结果基于 100 个由人类专家精心设计的现实问题,并由 5 位专家进行评分\*。
35 |
36 |
37 |
38 |
39 | * 所有实验均在 2024 年 7 月 7 日之前完成。
40 |
41 | ## ⚽️ 构建您自己的 MindSearch
42 |
43 | ### 步骤1: 依赖安装
44 |
45 | ```bash
46 | pip install -r requirements.txt
47 | ```
48 |
49 | ### 步骤2: 启动 MindSearch API
50 |
51 | 启动 FastAPI 服务器
52 |
53 | ```bash
54 | python -m mindsearch.app --lang en --model_format internlm_server --search_engine DuckDuckGoSearch
55 | ```
56 |
57 | - `--lang`: 模型的语言,`en` 为英语,`cn` 为中文。
58 | - `--model_format`: 模型的格式。
59 | - `internlm_server` 为 InternLM2.5-7b-chat 本地服务器。
60 | - `gpt4` 为 GPT4。
61 | 如果您想使用其他模型,请修改 [models](./mindsearch/agent/models.py)
62 | - `--search_engine`: 搜索引擎。
63 | - `DuckDuckGoSearch` 为 DuckDuckGo 搜索引擎。
64 | - `BingSearch` 为 Bing 搜索引擎。
65 | - `BraveSearch` 为 Brave 搜索引擎。
66 | - `GoogleSearch` 为 Google Serper 搜索引擎。
67 | - `TencentSearch` 为 Tencent 搜索引擎。
68 |
69 | 请将 DuckDuckGo 和 Tencent 以外的网页搜索引擎 API 密钥设置为 `WEB_SEARCH_API_KEY` 环境变量。如果使用 DuckDuckGo,则无需设置;如果使用 Tencent,请设置 `TENCENT_SEARCH_SECRET_ID` 和 `TENCENT_SEARCH_SECRET_KEY`。
70 |
71 | ### 步骤3: 启动 MindSearch 前端
72 |
73 | 提供以下几种前端界面:
74 |
75 | - React
76 |
77 | 首先配置Vite的API代理,指定实际后端URL
78 |
79 | ```bash
80 | HOST="127.0.0.1"
81 | PORT=8002
82 | sed -i -r "s/target:\s*\"\"/target: \"${HOST}:${PORT}\"/" frontend/React/vite.config.ts
83 | ```
84 |
85 | ```bash
86 | # 安装 Node.js 和 npm
87 | # 对于 Ubuntu
88 | sudo apt install nodejs npm
89 | # 对于 Windows
90 | # 从 https://nodejs.org/zh-cn/download/prebuilt-installer 下载
91 |
92 | cd frontend/React
93 | npm install
94 | npm start
95 | ```
96 |
97 | 更多细节请参考 [React](./frontend/React/README.md)
98 |
99 | - Gradio
100 |
101 | ```bash
102 | python frontend/mindsearch_gradio.py
103 | ```
104 |
105 | - Streamlit
106 |
107 | ```bash
108 | streamlit run frontend/mindsearch_streamlit.py
109 | ```
110 |
111 | ## 🐞 本地调试
112 |
113 | ```bash
114 | python mindsearch/terminal.py
115 | ```
116 |
117 | ## 📝 许可证
118 |
119 | 该项目按照 [Apache 2.0 许可证](LICENSE) 发行。
120 |
121 | ## 学术引用
122 |
123 | 如果此项目对您的研究有帮助,请参考如下方式进行引用:
124 |
125 | ```
126 | @article{chen2024mindsearch,
127 | title={MindSearch: Mimicking Human Minds Elicits Deep AI Searcher},
128 | author={Chen, Zehui and Liu, Kuikun and Wang, Qiuchen and Liu, Jiangning and Zhang, Wenwei and Chen, Kai and Zhao, Feng},
129 | journal={arXiv preprint arXiv:2407.20183},
130 | year={2024}
131 | }
132 | ```
133 |
134 | ## 相关项目
135 |
136 | 关注我们其他在大语言模型上的一些探索,主要为LLM智能体方向。
137 |
138 | - [Lagent](https://github.com/InternLM/lagent): 一个轻便简洁的大语言模型智能体框架
139 | - [AgentFLAN](https://github.com/InternLM/Agent-FLAN): 一套构建高质量智能体语料和训练模型的方法 (ACL 2024 Findings)
140 | - [T-Eval](https://github.com/open-compass/T-Eval): 一个细粒度评估LLM调用工具能力的评测及 (ACL 2024)
141 |
--------------------------------------------------------------------------------
/assets/mindsearch_openset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InternLM/MindSearch/bfb41be176f95700bcf199be69b001096425bbbd/assets/mindsearch_openset.png
--------------------------------------------------------------------------------
/assets/teaser.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InternLM/MindSearch/bfb41be176f95700bcf199be69b001096425bbbd/assets/teaser.gif
--------------------------------------------------------------------------------
/backend_example.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import requests
4 |
5 | # Define the backend URL
6 | url = "http://localhost:8002/solve"
7 | headers = {"Content-Type": "application/json"}
8 |
9 |
10 | # Function to send a query to the backend and get the response
11 | def get_response(query):
12 | # Prepare the input data
13 | data = {"inputs": query}
14 |
15 | # Send the request to the backend
16 | response = requests.post(url, headers=headers, data=json.dumps(data), timeout=20, stream=True)
17 |
18 | # Process the streaming response
19 | for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\n"):
20 | if chunk:
21 | decoded = chunk.decode("utf-8")
22 | if decoded == "\r":
23 | continue
24 | if decoded[:6] == "data: ":
25 | decoded = decoded[6:]
26 | elif decoded.startswith(": ping - "):
27 | continue
28 | response_data = json.loads(decoded)
29 | agent_return = response_data["response"]
30 | node_name = response_data["current_node"]
31 | print(f"Node: {node_name}, Response: {agent_return['response']}")
32 |
33 |
34 | # Example usage
35 | if __name__ == "__main__":
36 | query = "What is the weather like today in New York?"
37 | get_response(query)
38 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | # MSDL (MindSearch Docker Launcher) User Guide
2 |
3 | English | [简体中文](README_zh-CN.md)
4 |
5 | ## Introduction
6 |
7 | MSDL (MindSearch Docker Launcher) is a command-line tool designed to simplify the deployment process of MindSearch. It helps users configure and launch the Docker environment for MindSearch through an interactive interface, reducing the complexity of deployment. MSDL primarily serves as a scaffold for deploying containers and does not involve optimization of MindSearch's core logic.
8 |
9 | ## Prerequisites
10 |
11 | - Python 3.7 or higher
12 | - Docker (Docker Compose included; most newer Docker versions have it integrated)
13 | - Git (for cloning the repository)
14 | - Stable internet connection
15 | - Sufficient disk space (required space varies depending on the selected deployment option)
16 |
17 | ## Installation Steps
18 |
19 | 1. Clone the MindSearch repository:
20 | ```bash
21 | git clone https://github.com/InternLM/MindSearch.git # If you have already cloned the repository, you can skip this step.
22 | cd MindSearch/docker
23 | ```
24 |
25 | 2. Install MSDL:
26 | ```bash
27 | pip install -e .
28 | ```
29 |
30 | ## Usage
31 |
32 | After installation, you can run the MSDL command from any directory:
33 |
34 | ```bash
35 | msdl
36 | ```
37 |
38 | Follow the interactive prompts for configuration:
39 | - Choose the language for the Agent (Chinese or English; this only affects the language of prompts).
40 | - Select the model deployment type (local model or cloud model).
41 | - Choose the model format:
42 | - Currently, only `internlm_silicon` works properly for cloud models.
43 | - For local models, only `internlm_server` has passed tests and runs correctly.
44 | - Enter the necessary API keys (e.g., SILICON_API_KEY).
45 |
46 | MSDL will automatically perform the following actions:
47 | - Copy and configure the necessary Dockerfile and docker-compose.yaml files.
48 | - Build Docker images.
49 | - Launch Docker containers.
50 |
51 | ## Deployment Options Comparison
52 |
53 | ### Cloud Model Deployment (Recommended)
54 |
55 | **Advantages**:
56 | - Lightweight deployment with minimal disk usage (frontend around 510MB, backend around 839MB).
57 | - No need for high-performance hardware.
58 | - Easy to deploy and maintain.
59 | - You can freely use the `internlm/internlm2_5-7b-chat` model via SiliconCloud.
60 | - High concurrency, fast inference speed.
61 |
62 | **Instructions**:
63 | - Select the "Cloud Model" option.
64 | - Choose "internlm_silicon" as the model format.
65 | - Enter the SiliconCloud API Key (register at https://cloud.siliconflow.cn/ to obtain it).
66 |
67 | **Important Notes**:
68 | - The `internlm/internlm2_5-7b-chat` model is freely accessible on SiliconCloud.
69 | - MindSearch has no financial relationship with SiliconCloud; this service is recommended solely because it provides valuable resources to the open-source community.
70 |
71 | ### Local Model Deployment
72 |
73 | **Features**:
74 | - Uses the `openmmlab/lmdeploy` image.
75 | - Based on the PyTorch environment.
76 | - Requires significant disk space (backend container 15GB+, model 15GB+, totaling 30GB+).
77 | - Requires a powerful GPU (12GB or more of VRAM recommended).
78 |
79 | **Instructions**:
80 | - Select the "Local Model" option.
81 | - Choose "internlm_server" as the model format.
82 |
83 | **Relevant Links**:
84 | - lmdeploy image: https://hub.docker.com/r/openmmlab/lmdeploy/tags
85 | - InternLM2.5 project: https://huggingface.co/internlm/internlm2_5-7b-chat
86 |
87 | ## Notes
88 |
89 | - Currently, only the `internlm_silicon` format works properly for cloud models, and only the `internlm_server` format has passed tests for local models.
90 | - The language selection only affects the language of the Agent's prompts and does not change the language of the React frontend.
91 | - The first run might take a long time to download necessary model files and Docker images.
92 | - When using cloud models, ensure a stable network connection.
93 |
94 | ## Troubleshooting
95 |
96 | 1. Ensure the Docker service is running.
97 | 2. Check if there is sufficient disk space.
98 | 3. Ensure all necessary environment variables are set correctly.
99 | 4. Check if the network connection is stable.
100 | 5. Verify the validity of API keys (e.g., for cloud models).
101 |
102 | If problems persist, check the Issues section in the MindSearch GitHub repository or submit a new issue.
103 |
104 | ## Privacy and Security
105 |
106 | MSDL is a locally executed tool and does not transmit any API keys or sensitive information. All configuration information is stored in the `msdl/temp/.env` file, used only to simplify the deployment process.
107 |
108 | ## Updating MSDL
109 |
110 | To update MSDL to the latest version, follow these steps:
111 |
112 | 1. Navigate to the MindSearch directory.
113 | 2. Pull the latest code:
114 | ```bash
115 | git pull origin main
116 | ```
117 | 3. Reinstall MSDL:
118 | ```bash
119 | cd docker
120 | pip install -e .
121 | ```
122 |
123 | ## Conclusion
124 |
125 | If you have any questions or suggestions, feel free to submit an issue on GitHub or contact us directly. Thank you for using MindSearch and MSDL!
--------------------------------------------------------------------------------
/docker/README_zh-CN.md:
--------------------------------------------------------------------------------
1 | # MSDL (MindSearch Docker Launcher) 使用指南
2 |
3 | [English](README.md) | 简体中文
4 |
5 | ## 简介
6 |
7 | MSDL (MindSearch Docker Launcher) 是一个专为简化 MindSearch 部署过程而设计的命令行工具。它通过交互式界面帮助用户轻松配置和启动 MindSearch 的 Docker 环境,降低了部署的复杂性。MSDL 主要作为部署容器的脚手架,不涉及 MindSearch 核心逻辑的优化。
8 |
9 | ## 环境要求
10 |
11 | - Python 3.7 或更高版本
12 | - Docker (需包含 Docker Compose,新版本的 Docker 通常已集成)
13 | - Git (用于克隆仓库)
14 | - 稳定的网络连接
15 | - 充足的磁盘空间(根据选择的部署方案,所需空间有所不同)
16 |
17 | ## 安装步骤
18 |
19 | 1. 克隆 MindSearch 仓库:
20 | ```bash
21 | git clone https://github.com/InternLM/MindSearch.git # 已经克隆过的,可以忽略执行此步骤
22 | cd MindSearch/docker
23 | ```
24 |
25 | 2. 安装 MSDL:
26 | ```bash
27 | pip install -e .
28 | ```
29 |
30 | ## 使用方法
31 |
32 | 安装完成后,您可以在任意目录下运行 MSDL 命令:
33 |
34 | ```bash
35 | msdl
36 | ```
37 |
38 | 按照交互式提示进行配置:
39 | - 选择 Agent 使用的语言(中文或英文,仅影响 Agent 的提示词语言)
40 | - 选择模型部署类型(本地模型或云端模型)
41 | - 选择模型格式
42 | - 云端模型目前只有 internlm_silicon 能够正常运行
43 | - 本地模型目前只有 internlm_server 通过测试,能正常运行
44 | - 输入必要的 API 密钥(如 SILICON_API_KEY)
45 |
46 | MSDL 将自动执行以下操作:
47 | - 复制并配置必要的 Dockerfile 和 docker-compose.yaml 文件
48 | - 构建 Docker 镜像
49 | - 启动 Docker 容器
50 |
51 | ## 部署方案比较
52 |
53 | ### 云端模型部署(推荐)
54 |
55 | **优势**:
56 | - 轻量级部署,磁盘占用小(前端约 510MB,后端约 839MB)
57 | - 无需高性能硬件
58 | - 部署和维护简单
59 | - 使用 SiliconCloud 可免费调用 internlm/internlm2_5-7b-chat 模型
60 | - 高并发量,推理速度快
61 |
62 | **使用说明**:
63 | - 选择"云端模型"选项
64 | - 选择 "internlm_silicon" 作为模型格式
65 | - 输入 SiliconCloud API Key(需在 https://cloud.siliconflow.cn/ 注册获取)
66 |
67 | **重要说明**:
68 | - internlm/internlm2_5-7b-chat 模型在 SiliconCloud 上可以免费调用,但 API Key 仍需妥善保管好。
69 | - MindSearch 项目与 SiliconCloud 并无利益关系,只是使用它能更好地体验 MindSearch 的效果,感谢 SiliconCloud 为开源社区所做的贡献。
70 |
71 | ### 本地模型部署
72 |
73 | **特点**:
74 | - 使用 openmmlab/lmdeploy 镜像
75 | - 基于 PyTorch 环境
76 | - 需要大量磁盘空间(后端容器 15GB+,模型 15GB+,总计 30GB 以上)
77 | - 需要强大的 GPU(建议 12GB 或以上显存)
78 |
79 | **使用说明**:
80 | - 选择"本地模型"选项
81 | - 选择 "internlm_server" 作为模型格式
82 |
83 | **相关链接**:
84 | - lmdeploy 镜像: https://hub.docker.com/r/openmmlab/lmdeploy/tags
85 | - InternLM2.5 项目: https://huggingface.co/internlm/internlm2_5-7b-chat
86 |
87 | ## 注意事项
88 |
89 | - 云端模型目前只有 internlm_silicon 格式能够正常运行,本地模型只有 internlm_server 格式通过测试能正常运行。
90 | - 选择语言只会影响 Agent 的提示词语言,不会改变 React 前端的界面语言。
91 | - 首次运行可能需要较长时间来下载必要的模型文件和 Docker 镜像。
92 | - 使用云端模型时,请确保网络连接稳定。
93 |
94 | ## 故障排除
95 |
96 | 1. 确保 Docker 服务正在运行。
97 | 2. 检查是否有足够的磁盘空间。
98 | 3. 确保所有必要的环境变量已正确设置。
99 | 4. 检查网络连接是否正常。
100 | 5. 验证 API Key 是否有效(如使用云端模型)。
101 |
102 | 如果问题持续,请查看 MindSearch 的 GitHub 仓库中的 Issues 部分,或提交新的 Issue。
103 |
104 | ## 隐私和安全
105 |
106 | MSDL 是纯本地执行的工具,不会上报任何 API Key 或其他敏感信息。所有配置信息存储在 `msdl/temp/.env` 文件中,仅用于简化部署过程。
107 |
108 | ## 更新 MSDL
109 |
110 | 要更新 MSDL 到最新版本,请执行以下步骤:
111 |
112 | 1. 进入 MindSearch 目录
113 | 2. 拉取最新的代码:
114 | ```bash
115 | git pull origin main
116 | ```
117 | 3. 重新安装 MSDL:
118 | ```bash
119 | cd docker
120 | pip install -e .
121 | ```
122 |
123 | ## 结语
124 |
125 | 如有任何问题或建议,欢迎在 GitHub 上提交 Issue 或直接联系我们。感谢您使用 MindSearch 和 MSDL!
--------------------------------------------------------------------------------
/docker/msdl/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InternLM/MindSearch/bfb41be176f95700bcf199be69b001096425bbbd/docker/msdl/__init__.py
--------------------------------------------------------------------------------
/docker/msdl/__main__.py:
--------------------------------------------------------------------------------
1 | # msdl/__main__.py
2 | import signal
3 | import sys
4 | import argparse
5 | import os
6 | from pathlib import Path
7 |
8 | from msdl.config import (
9 | BACKEND_DOCKERFILE_DIR,
10 | FRONTEND_DOCKERFILE_DIR,
11 | PACKAGE_DIR,
12 | PROJECT_ROOT,
13 | REACT_DOCKERFILE,
14 | TEMP_DIR,
15 | TEMPLATE_FILES,
16 | )
17 | from msdl.docker_manager import (
18 | check_docker_install,
19 | run_docker_compose,
20 | stop_and_remove_containers,
21 | update_docker_compose_paths,
22 | )
23 | from msdl.i18n import (
24 | setup_i18n,
25 | t,
26 | )
27 | from msdl.utils import (
28 | copy_templates_to_temp,
29 | copy_backend_dockerfile,
30 | copy_frontend_dockerfile,
31 | modify_docker_compose,
32 | )
33 | from msdl.user_interaction import get_user_choices
34 |
35 |
36 | def signal_handler(signum, frame):
37 | print(t("TERMINATION_SIGNAL"))
38 | stop_and_remove_containers()
39 | sys.exit(0)
40 |
41 |
42 | def parse_args():
43 | parser = argparse.ArgumentParser(description=t("CLI_DESCRIPTION"))
44 | parser.add_argument('--language', '-l',
45 | help=t("LANGUAGE_HELP"),
46 | choices=["en", "zh_CN"],
47 | default=None)
48 | parser.add_argument('--config-language', action='store_true',
49 | help=t("CONFIG_LANGUAGE_HELP"))
50 | return parser.parse_args()
51 |
52 |
53 | def main():
54 | # Setup signal handler
55 | signal.signal(signal.SIGINT, signal_handler)
56 | signal.signal(signal.SIGTERM, signal_handler)
57 |
58 | # Initialize i18n
59 | setup_i18n()
60 |
61 | # Parse command line arguments
62 | args = parse_args()
63 | if args.language:
64 | # set_language(args.language)
65 | # Reinitialize i18n with new language
66 | setup_i18n()
67 |
68 | try:
69 | # Check if TEMP_DIR exists, if not, create it
70 | if not TEMP_DIR.exists():
71 | TEMP_DIR.mkdir(parents=True, exist_ok=True)
72 | print(t("TEMP_DIR_CREATED", dir=str(TEMP_DIR)))
73 |
74 | check_docker_install()
75 |
76 | # Get user choices using the new module
77 | backend_language, model, model_format, search_engine = get_user_choices()
78 |
79 | # Copy template files
80 | copy_templates_to_temp(TEMPLATE_FILES)
81 |
82 | # Copy Dockerfiles
83 | copy_backend_dockerfile(model)
84 | copy_frontend_dockerfile()
85 |
86 | # Update paths in docker-compose.yml
87 | update_docker_compose_paths()
88 |
89 | # Modify docker-compose.yml based on user choices
90 | modify_docker_compose(model, backend_language, model_format, search_engine)
91 |
92 | stop_and_remove_containers()
93 | run_docker_compose()
94 |
95 | print(t("DOCKER_LAUNCHER_COMPLETE"))
96 | except KeyboardInterrupt:
97 | print(t("KEYBOARD_INTERRUPT"))
98 | # stop_and_remove_containers()
99 | sys.exit(0)
100 | except Exception as e:
101 | print(t("UNEXPECTED_ERROR", error=str(e)))
102 | # stop_and_remove_containers()
103 | sys.exit(1)
104 |
105 |
106 | if __name__ == "__main__":
107 | main()
108 |
--------------------------------------------------------------------------------
/docker/msdl/config.py:
--------------------------------------------------------------------------------
1 | # msdl/config.py
2 |
3 | from pathlib import Path
4 |
5 |
6 | class FileSystemManager:
7 |
8 | @staticmethod
9 | def ensure_dir(dir_path):
10 | """Ensure the directory exists, create if it doesn't"""
11 | path = Path(dir_path)
12 | if not path.exists():
13 | path.mkdir(parents=True, exist_ok=True)
14 | return path
15 |
16 | @staticmethod
17 | def ensure_file(file_path, default_content=""):
18 | """Ensure the file exists, create if it doesn't"""
19 | path = Path(file_path)
20 | if not path.parent.exists():
21 | FileSystemManager.ensure_dir(path.parent)
22 | if not path.exists():
23 | with open(path, "w") as f:
24 | f.write(default_content)
25 | return path
26 |
27 |
28 | # Get the directory where the script is located
29 | PACKAGE_DIR = Path(__file__).resolve().parent
30 |
31 | # Get the root directory of the MindSearch project
32 | PROJECT_ROOT = PACKAGE_DIR.parent.parent
33 |
34 | # Get the temp directory path, which is actually the working directory for executing the docker compose up command
35 | TEMP_DIR = FileSystemManager.ensure_dir(PACKAGE_DIR / "temp")
36 |
37 | # Configuration file name list
38 | TEMPLATE_FILES = ["docker-compose.yaml"]
39 |
40 | # Backend Dockerfile directory
41 | BACKEND_DOCKERFILE_DIR = "backend"
42 |
43 | # Backend Dockerfile name
44 | CLOUD_LLM_DOCKERFILE = "cloud_llm.dockerfile"
45 | LOCAL_LLM_DOCKERFILE = "local_llm.dockerfile"
46 |
47 | # Frontend Dockerfile directory
48 | FRONTEND_DOCKERFILE_DIR = "frontend"
49 |
50 | # Frontend Dockerfile name
51 | REACT_DOCKERFILE = "react.dockerfile"
52 |
53 | # i18n translations directory
54 | TRANSLATIONS_DIR = FileSystemManager.ensure_dir(PACKAGE_DIR / "translations")
55 |
56 | # Get the path of the .env file
57 | ENV_FILE_PATH = FileSystemManager.ensure_file(TEMP_DIR / ".env")
58 |
--------------------------------------------------------------------------------
/docker/msdl/docker_manager.py:
--------------------------------------------------------------------------------
1 | # msdl/docker_manager.py
2 |
3 | import os
4 | import subprocess
5 | import sys
6 | from functools import lru_cache
7 |
8 | import yaml
9 | from msdl.config import PROJECT_ROOT, TEMP_DIR
10 | from msdl.i18n import t
11 |
12 |
13 | @lru_cache(maxsize=1)
14 | def get_docker_command():
15 | try:
16 | subprocess.run(
17 | ["docker", "compose", "version"], check=True, capture_output=True
18 | )
19 | return ["docker", "compose"]
20 | except subprocess.CalledProcessError:
21 | try:
22 | subprocess.run(
23 | ["docker-compose", "--version"], check=True, capture_output=True
24 | )
25 | return ["docker-compose"]
26 | except subprocess.CalledProcessError:
27 | print(t("DOCKER_COMPOSE_NOT_FOUND"))
28 | sys.exit(1)
29 |
30 |
31 | @lru_cache(maxsize=1)
32 | def check_docker_install():
33 | try:
34 | subprocess.run(["docker", "--version"], check=True, capture_output=True)
35 | docker_compose_cmd = get_docker_command()
36 | subprocess.run(
37 | docker_compose_cmd + ["version"], check=True, capture_output=True
38 | )
39 | print(t("DOCKER_INSTALLED"))
40 | return True
41 | except subprocess.CalledProcessError as e:
42 | print(t("DOCKER_INSTALL_ERROR", error=str(e)))
43 | return False
44 | except FileNotFoundError:
45 | print(t("DOCKER_NOT_FOUND"))
46 | return False
47 |
48 |
49 | def stop_and_remove_containers():
50 | docker_compose_cmd = get_docker_command()
51 | compose_file = os.path.join(TEMP_DIR, "docker-compose.yaml")
52 |
53 | # Read the docker-compose.yaml file
54 | try:
55 | with open(compose_file, "r") as file:
56 | compose_config = yaml.safe_load(file)
57 | except Exception as e:
58 | print(t("COMPOSE_FILE_READ_ERROR", error=str(e)))
59 | return
60 |
61 | # Get project name and service names
62 | project_name = compose_config.get("name", "mindsearch")
63 | service_names = list(compose_config.get("services", {}).keys())
64 |
65 | # Use only the project name as the container prefix
66 | container_prefix = f"{project_name}_"
67 |
68 | try:
69 | # 1. Try to stop containers using the current docker-compose.yaml
70 | subprocess.run(
71 | docker_compose_cmd + ["-f", compose_file, "down", "-v", "--remove-orphans"],
72 | check=True,
73 | )
74 | except subprocess.CalledProcessError:
75 | print(t("CURRENT_COMPOSE_STOP_FAILED"))
76 |
77 | # 2. Attempt to clean up potentially existing containers, regardless of the success of the previous step
78 | try:
79 | # List all containers (including stopped ones)
80 | result = subprocess.run(
81 | ["docker", "ps", "-a", "--format", "{{.Names}}"],
82 | check=True,
83 | capture_output=True,
84 | text=True,
85 | )
86 | all_containers = result.stdout.splitlines()
87 |
88 | # 3. Filter out containers belonging to our project
89 | project_containers = [
90 | c
91 | for c in all_containers
92 | if c.startswith(container_prefix)
93 | or any(c == f"{project_name}-{service}" for service in service_names)
94 | ]
95 |
96 | if project_containers:
97 | # 4. Force stop and remove these containers
98 | for container in project_containers:
99 | try:
100 | subprocess.run(["docker", "stop", container], check=True)
101 | subprocess.run(["docker", "rm", "-f", container], check=True)
102 | print(t("CONTAINER_STOPPED_AND_REMOVED", container=container))
103 | except subprocess.CalledProcessError as e:
104 | print(t("CONTAINER_STOP_ERROR", container=container, error=str(e)))
105 |
106 | # 5. Clean up potentially leftover networks
107 | try:
108 | subprocess.run(["docker", "network", "prune", "-f"], check=True)
109 | print(t("NETWORKS_PRUNED"))
110 | except subprocess.CalledProcessError as e:
111 | print(t("NETWORK_PRUNE_ERROR", error=str(e)))
112 |
113 | except subprocess.CalledProcessError as e:
114 | print(t("DOCKER_LIST_ERROR", error=str(e)))
115 |
116 | print(t("CONTAINERS_STOPPED_AND_REMOVED"))
117 |
118 |
119 | def run_docker_compose():
120 | docker_compose_cmd = get_docker_command()
121 | try:
122 | print(t("STARTING_CONTAINERS_WITH_BUILD"))
123 | subprocess.run(
124 | docker_compose_cmd
125 | + [
126 | "-f",
127 | os.path.join(TEMP_DIR, "docker-compose.yaml"),
128 | "--env-file",
129 | os.path.join(TEMP_DIR, ".env"),
130 | "up",
131 | "-d",
132 | "--build",
133 | ],
134 | check=True,
135 | )
136 | print(t("CONTAINERS_STARTED"))
137 | except subprocess.CalledProcessError as e:
138 | print(t("DOCKER_ERROR", error=str(e)))
139 | print(t("DOCKER_OUTPUT"))
140 | print(e.output.decode() if e.output else "No output")
141 | stop_and_remove_containers()
142 | sys.exit(1)
143 |
144 |
145 | def update_docker_compose_paths(project_root=PROJECT_ROOT):
146 | docker_compose_path = os.path.join(TEMP_DIR, "docker-compose.yaml")
147 | with open(docker_compose_path, "r") as file:
148 | compose_data = yaml.safe_load(file)
149 | for service in compose_data["services"].values():
150 | if "build" in service:
151 | if "context" in service["build"]:
152 | if service["build"]["context"] == "..":
153 | service["build"]["context"] = project_root
154 | else:
155 | service["build"]["context"] = os.path.join(
156 | project_root, service["build"]["context"]
157 | )
158 | if "dockerfile" in service["build"]:
159 | dockerfile_name = os.path.basename(service["build"]["dockerfile"])
160 | service["build"]["dockerfile"] = os.path.join(TEMP_DIR, dockerfile_name)
161 | with open(docker_compose_path, "w") as file:
162 | yaml.dump(compose_data, file)
163 | print(t("PATHS_UPDATED"))
164 |
165 |
166 | def main():
167 | if check_docker_install():
168 | update_docker_compose_paths()
169 | run_docker_compose()
170 | else:
171 | sys.exit(1)
172 |
173 |
174 | if __name__ == "__main__":
175 | main()
176 |
--------------------------------------------------------------------------------
/docker/msdl/i18n.py:
--------------------------------------------------------------------------------
1 | # msdl/translations/i18n.py
2 |
3 | import os
4 | import i18n
5 | import locale
6 | from dotenv import load_dotenv, set_key, find_dotenv
7 | from msdl.config import TRANSLATIONS_DIR, ENV_FILE_PATH
8 | from pathlib import Path
9 |
10 | # Load environment variables at module level
11 | load_dotenv(ENV_FILE_PATH)
12 |
13 | def get_env_variable(var_name, default=None):
14 | return os.getenv(var_name, default)
15 |
16 | def set_env_variable(var_name, value):
17 | dotenv_file = find_dotenv(ENV_FILE_PATH)
18 | set_key(dotenv_file, var_name, value)
19 | # Reload environment variables after setting
20 | os.environ[var_name] = value
21 |
22 | def get_system_language():
23 | try:
24 | return locale.getlocale()[0].split("_")[0]
25 | except:
26 | return "en"
27 |
28 | def get_available_languages():
29 | """Get list of available language codes from translation files"""
30 | translations_path = Path(TRANSLATIONS_DIR)
31 | if not translations_path.exists():
32 | return ["en"]
33 | return [f.stem for f in translations_path.glob("*.yaml")]
34 |
35 | def set_language(language_code):
36 | """Set the interaction language and persist it to .env file"""
37 | available_langs = get_available_languages()
38 | if language_code not in available_langs:
39 | print(f"Warning: Language '{language_code}' not available. Using 'en' instead.")
40 | language_code = "en"
41 |
42 | set_env_variable("LAUNCHER_INTERACTION_LANGUAGE", language_code)
43 | i18n.set("locale", language_code)
44 |
45 |
46 | def setup_i18n():
47 | # Initialize i18n settings
48 | i18n.load_path.append(TRANSLATIONS_DIR)
49 | i18n.set("filename_format", "{locale}.{format}")
50 | i18n.set("file_format", "yaml")
51 |
52 | # Get language from environment
53 | env_language = get_env_variable("LAUNCHER_INTERACTION_LANGUAGE")
54 | if not env_language:
55 | # If no language is set, use English as default without saving to .env
56 | env_language = "en"
57 |
58 | # Force reload translations
59 | i18n.set("locale", None) # Clear current locale
60 | i18n.set("locale", env_language) # Set new locale
61 |
62 |
63 | def t(key, **kwargs):
64 | return i18n.t(key, **kwargs)
65 |
--------------------------------------------------------------------------------
/docker/msdl/templates/backend/cloud_llm.dockerfile:
--------------------------------------------------------------------------------
1 | # Use Python 3.11.9 as the base image
2 | FROM python:3.11.9-slim
3 |
4 | # Set the working directory
5 | WORKDIR /root
6 |
7 | # Install Git
8 | RUN apt-get update && apt-get install -y git && apt-get clean && rm -rf /var/lib/apt/lists/*
9 |
10 | # Install specified dependency packages
11 | RUN pip install --no-cache-dir \
12 | duckduckgo_search==5.3.1b1 \
13 | einops \
14 | fastapi \
15 | janus \
16 | pyvis \
17 | sse-starlette \
18 | termcolor \
19 | uvicorn \
20 | griffe==0.48.0 \
21 | python-dotenv \
22 | lagent==0.5.0rc1
23 |
24 | # Copy the mindsearch folder to the /root directory of the container
25 | COPY mindsearch /root/mindsearch
--------------------------------------------------------------------------------
/docker/msdl/templates/backend/local_llm.dockerfile:
--------------------------------------------------------------------------------
1 | # Use openmmlab/lmdeploy:latest-cu12 as the base image
2 | # Note: Before using this Dockerfile, you should visit https://hub.docker.com/r/openmmlab/lmdeploy/tags
3 | # to select a base image that's compatible with your specific GPU architecture.
4 | # The 'latest-cu12' tag is used here as an example, but you should choose the most
5 | # appropriate tag for your setup (e.g., cu11 for CUDA 11, cu12 for CUDA 12, etc.)
6 | FROM openmmlab/lmdeploy:latest-cu12
7 |
8 | # Set the working directory
9 | WORKDIR /root
10 |
11 | # Install Git
12 | RUN apt-get update && apt-get install -y git && apt-get clean && rm -rf /var/lib/apt/lists/*
13 |
14 | # Install specified dependency packages
15 | # Note: lmdeploy dependency is already included in the base image, no need to reinstall
16 | RUN pip install --no-cache-dir \
17 | duckduckgo_search==5.3.1b1 \
18 | einops \
19 | fastapi \
20 | janus \
21 | pyvis \
22 | sse-starlette \
23 | termcolor \
24 | uvicorn \
25 | griffe==0.48.0 \
26 | python-dotenv \
27 | lagent==0.5.0rc1
28 |
29 | # Copy the mindsearch folder to the /root directory of the container
30 | COPY mindsearch /root/mindsearch
--------------------------------------------------------------------------------
/docker/msdl/templates/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | backend:
3 | container_name: mindsearch-backend
4 | build:
5 | context: .
6 | dockerfile: backend.dockerfile
7 | image: mindsearch/backend:latest
8 | restart: unless-stopped
9 | # Uncomment the following line to force using local build
10 | # pull: never
11 | ports:
12 | - "8002:8002"
13 | environment:
14 | - PYTHONUNBUFFERED=1
15 | # - OPENAI_API_KEY=${OPENAI_API_KEY:-}
16 | - OPENAI_API_BASE=${OPENAI_API_BASE:-https://api.openai.com/v1}
17 | # - QWEN_API_KEY=${QWEN_API_KEY:-}
18 | # - SILICON_API_KEY=${SILICON_API_KEY:-}
19 | command: python -m mindsearch.app --lang ${LANG:-cn} --model_format ${MODEL_FORMAT:-internlm_server}
20 | volumes:
21 | - /root/.cache:/root/.cache
22 | deploy:
23 | resources:
24 | reservations:
25 | devices:
26 | - driver: nvidia
27 | count: 1
28 | capabilities: [gpu]
29 | # GPU support explanation:
30 | # The current configuration has been tested with NVIDIA GPUs. If you use other types of GPUs, you may need to adjust the configuration.
31 | # For AMD GPUs, you can try using the ROCm driver by modifying the configuration as follows:
32 | # deploy:
33 | # resources:
34 | # reservations:
35 | # devices:
36 | # - driver: amd
37 | # count: 1
38 | # capabilities: [gpu]
39 | #
40 | # For other GPU types, you may need to consult the respective Docker GPU support documentation.
41 | # In theory, any GPU supported by PyTorch should be configurable here.
42 | # If you encounter issues, try the following steps:
43 | # 1. Ensure the correct GPU drivers are installed on the host
44 | # 2. Check if your Docker version supports your GPU type
45 | # 3. Install necessary GPU-related libraries in the Dockerfile
46 | # 4. Adjust the deploy configuration here to match your GPU type
47 | #
48 | # Note: After changing GPU configuration, you may need to rebuild the image.
49 |
50 | frontend:
51 | container_name: mindsearch-frontend
52 | build:
53 | context: .
54 | dockerfile: frontend.dockerfile
55 | image: mindsearch/frontend:latest
56 | restart: unless-stopped
57 | # Uncomment the following line to force using local build
58 | # pull: never
59 | ports:
60 | - "8080:8080"
61 | depends_on:
62 | - backend
63 |
--------------------------------------------------------------------------------
/docker/msdl/templates/frontend/react.dockerfile:
--------------------------------------------------------------------------------
1 | # Use Node.js 18 as the base image
2 | FROM node:18-alpine
3 |
4 | # Set the working directory
5 | WORKDIR /app
6 |
7 | # Copy package files first to leverage Docker cache
8 | COPY frontend/React/package*.json ./
9 |
10 | # Install dependencies
11 | RUN npm install
12 |
13 | # Copy source code after npm install to prevent unnecessary reinstalls
14 | COPY frontend/React/ ./
15 |
16 | # Modify vite.config.ts for Docker environment
17 | # Beacuse we use Docker Compose to manage the backend and frontend services, we can use the service name as the hostname
18 | RUN sed -i '/server: {/,/},/c\
19 | server: {\
20 | host: "0.0.0.0",\
21 | port: 8080,\
22 | proxy: {\
23 | "/solve": {\
24 | target: "http://backend:8002",\
25 | changeOrigin: true,\
26 | },\
27 | // "/solve": {\
28 | // target: "https://mindsearch.openxlab.org.cn",\
29 | // changeOrigin: true,\
30 | // },\
31 | },\
32 | },' vite.config.ts
33 |
34 | # Start the development server
35 | CMD ["npm", "start"]
--------------------------------------------------------------------------------
/docker/msdl/translations/en.yaml:
--------------------------------------------------------------------------------
1 | en:
2 | SCRIPT_DIR: "Script directory: %{dir}"
3 | PROJECT_ROOT: "Project root directory: %{dir}"
4 | TEMP_DIR: "Temporary directory: %{dir}"
5 | DOCKER_LAUNCHER_START: "Starting Docker launcher process"
6 | DOCKER_LAUNCHER_COMPLETE: "Docker launcher process completed"
7 | DIR_CREATED: "Directory created: %{dir}"
8 | FILE_COPIED: "Copied %{file} to the temp directory"
9 | FILE_NOT_FOUND: "Error: %{file} not found in the templates directory"
10 | CONTAINERS_STOPPED: "Existing containers and volumes stopped and removed"
11 | CONTAINER_STOP_ERROR: "Error stopping and removing containers (this may be normal if there were no running containers): %{error}"
12 | BUILDING_IMAGES: "Starting to build Docker images..."
13 | IMAGES_BUILT: "Docker images built successfully"
14 | STARTING_CONTAINERS: "Starting Docker containers..."
15 | STARTING_CONTAINERS_WITH_BUILD: "Starting to build and start Docker containers..."
16 | CONTAINERS_STARTED: "Docker containers started successfully"
17 | DOCKER_ERROR: "Error while building or starting Docker containers: %{error}"
18 | DOCKER_OUTPUT: "Docker command output:"
19 | DOCKER_INSTALLED: "Docker and Docker Compose installed correctly"
20 | DOCKER_INSTALL_ERROR: "Error: Docker or Docker Compose may not be installed correctly: %{error}"
21 | DOCKER_NOT_FOUND: "Error: Docker or Docker Compose command not found. Please ensure they are correctly installed and added to the PATH."
22 | DOCKER_COMPOSE_NOT_FOUND: "Error: Docker Compose command not found. Please ensure it is correctly installed and added to the PATH."
23 | PATHS_UPDATED: "Paths updated in docker-compose.yaml"
24 | COMPOSE_FILE_CONTENT: "docker-compose.yaml file content:"
25 | COMPOSE_FILE_NOT_FOUND: "Error: %{file} file not found"
26 | COMPOSE_FILE_READ_ERROR: "Error reading docker-compose.yaml file: %{error}"
27 | TERMINATION_SIGNAL: "Termination signal caught. Exiting gracefully..."
28 | KEYBOARD_INTERRUPT: "Keyboard interrupt caught. Exiting gracefully..."
29 | UNEXPECTED_ERROR: "An unexpected error occurred: %{error}"
30 | BACKEND_LANGUAGE_CHOICE: "Select MindSearch backend language (default is cn)"
31 | CHINESE: "Chinese (cn)"
32 | ENGLISH: "English (en)"
33 | MODEL_DEPLOYMENT_TYPE: "Select model deployment type:"
34 | CLOUD_MODEL: "Cloud model"
35 | LOCAL_MODEL: "Local model"
36 | MODEL_FORMAT_CHOICE: "Select model format:"
37 | CONFIRM_USE_EXISTING_API_KEY: "Do you want to use the existing %{ENV_VAR_NAME} API key?"
38 | CONFIRM_OVERWRITE_EXISTING_API_KEY: "Do you want to overwrite the existing %{ENV_VAR_NAME} API key?"
39 | PLEASE_INPUT_NEW_API_KEY: "Please enter a new %{ENV_VAR_NAME} API key:"
40 | PLEASE_INPUT_NEW_API_KEY_FROM_ZERO: "Please enter a new %{ENV_VAR_NAME} API key:"
41 | INVALID_API_KEY_FORMAT: "Invalid API key format"
42 | RETRY_API_KEY_INPUT: "Retry API key input"
43 | API_KEY_INPUT_CANCELLED: "API key input cancelled"
44 | UNKNOWN_API_KEY_TYPE: "Unknown API key type: %{KEY_TYPE}"
45 | UNKNOWN_MODEL_FORMAT: "Unknown model format: %{MODEL_FORMAT}"
46 | INVALID_API_KEY: "Invalid API key: %{KEY_TYPE}"
47 | API_KEY_SAVED: "API key for %{ENV_VAR_NAME} saved"
48 | UNKNOWN_DOCKERFILE: "Unknown Dockerfile: %{dockerfile}"
49 | UNKNOWN_MODEL_TYPE: "Unknown model type: %{model_type}"
50 | BACKEND_DOCKERFILE_COPIED: "Backend Dockerfile copied from %{source_path} to %{dest_path}"
51 | FRONTEND_DOCKERFILE_COPIED: "Frontend Dockerfile copied from %{source_path} to %{dest_path}"
52 | TEMP_DIR_CREATED: "Temporary directory created at %{dir}"
53 | CURRENT_COMPOSE_STOP_FAILED: "Current containers stop failed"
54 | CONTAINER_STOPPED_AND_REMOVED: "Container stopped and removed"
55 | NETWORKS_PRUNED: "Corresponding Docker networks pruned"
56 | NETWORK_PRUNE_ERROR: "Error pruning corresponding Docker networks: %{error}"
57 | DOCKER_LIST_ERROR: "Error listing Docker containers: %{error}"
58 | CONTAINERS_STOPPED_AND_REMOVED: "Docker containers stopped and removed"
59 | CLI_DESCRIPTION: "MindSearch Docker Launcher - A tool to manage MindSearch docker containers"
60 | LANGUAGE_HELP: "Set the msdl tool interface language (e.g. en, zh_CN)"
61 | CONFIG_LANGUAGE_HELP: "Show language configuration prompt"
62 | LANGUAGE_NOT_AVAILABLE: "Warning: Language '%{lang}' not available. Using English instead."
63 | SELECT_INTERFACE_LANGUAGE: "Select msdl tool interface language"
64 | SELECT_BACKEND_LANGUAGE: "Select MindSearch backend language (default is cn)"
65 | LANGUAGE_CHANGED_RESTARTING: "Language changed, restarting msdl..."
66 | SELECT_SEARCH_ENGINE: "Select search engine:"
67 | NO_API_KEY_NEEDED: "No API key needed"
68 | API_KEY_REQUIRED: "API key required"
69 | SEARCH_ENGINE_GOOGLE: "Google Search"
70 | SEARCH_ENGINE_BING: "Bing Search"
71 | SEARCH_ENGINE_DUCKDUCKGO: "DuckDuckGo Search"
72 | SEARCH_ENGINE_BRAVE: "Brave Search"
73 | SEARCH_ENGINE_TENCENT: "Tencent Search"
74 | TENCENT_ID_REQUIRED: "Please enter your Tencent Search Secret ID"
75 | TENCENT_KEY_REQUIRED: "Please enter your Tencent Search Secret Key"
76 | WEB_SEARCH_KEY_REQUIRED: "Please enter your Web Search API Key"
77 | SEARCH_ENGINE_CONFIGURED: "Search engine %{engine} configured successfully"
78 |
--------------------------------------------------------------------------------
/docker/msdl/translations/zh_CN.yaml:
--------------------------------------------------------------------------------
1 | zh_CN:
2 | SCRIPT_DIR: "脚本目录:%{dir}"
3 | PROJECT_ROOT: "项目根目录:%{dir}"
4 | TEMP_DIR: "临时目录:%{dir}"
5 | DOCKER_LAUNCHER_START: "开始 Docker 启动器流程"
6 | DOCKER_LAUNCHER_COMPLETE: "Docker 启动器流程完成"
7 | DIR_CREATED: "创建目录:%{dir}"
8 | FILE_COPIED: "已复制 %{file} 到 temp 目录"
9 | FILE_NOT_FOUND: "错误:%{file} 在 templates 目录中不存在"
10 | CONTAINERS_STOPPED: "已停止并删除现有容器和卷"
11 | CONTAINER_STOP_ERROR: "停止和删除容器时出错(这可能是正常的,如果没有正在运行的容器):%{error}"
12 | BUILDING_IMAGES: "开始构建Docker镜像..."
13 | IMAGES_BUILT: "Docker镜像构建成功"
14 | STARTING_CONTAINERS: "开始启动Docker容器..."
15 | STARTING_CONTAINERS_WITH_BUILD: "开始构建并启动Docker容器..."
16 | CONTAINERS_STARTED: "Docker 容器已成功启动"
17 | DOCKER_ERROR: "构建或启动 Docker 容器时出错:%{error}"
18 | DOCKER_OUTPUT: "Docker 命令输出:"
19 | DOCKER_INSTALLED: "Docker 和 Docker Compose 安装正确"
20 | DOCKER_INSTALL_ERROR: "错误:Docker 或 Docker Compose 可能没有正确安装:%{error}"
21 | DOCKER_NOT_FOUND: "错误:Docker 或 Docker Compose 命令未找到。请确保它们已正确安装并添加到PATH中。"
22 | DOCKER_COMPOSE_NOT_FOUND: "错误:Docker Compose 命令未找到。请确保它已正确安装并添加到PATH中。"
23 | PATHS_UPDATED: "已更新 docker-compose.yaml 中的路径"
24 | COMPOSE_FILE_CONTENT: "docker-compose.yaml 文件内容:"
25 | COMPOSE_FILE_NOT_FOUND: "错误:%{file} 文件不存在"
26 | COMPOSE_FILE_READ_ERROR: "读取 docker-compose.yaml 文件时出错:%{error}"
27 | TERMINATION_SIGNAL: "捕获到终止信号。正在优雅地退出..."
28 | KEYBOARD_INTERRUPT: "捕获到键盘中断。正在优雅地退出..."
29 | UNEXPECTED_ERROR: "发生未预期的错误:%{error}"
30 | BACKEND_LANGUAGE_CHOICE: "选择 MindSearch 后端语言(默认为中文)"
31 | SELECT_INTERFACE_LANGUAGE: "选择 msdl 工具界面语言"
32 | SELECT_BACKEND_LANGUAGE: "选择 MindSearch 后端语言(默认为中文)"
33 | CHINESE: "中文 (cn)"
34 | ENGLISH: "英文 (en)"
35 | MODEL_DEPLOYMENT_TYPE: "选择模型部署类型:"
36 | CLOUD_MODEL: "云端模型"
37 | LOCAL_MODEL: "本地模型"
38 | MODEL_FORMAT_CHOICE: "选择模型格式:"
39 | CONFIRM_USE_EXISTING_API_KEY: "是否使用现有的 %{ENV_VAR_NAME} API 密钥?"
40 | CONFIRM_OVERWRITE_EXISTING_API_KEY: "是否覆盖现有的 %{ENV_VAR_NAME} API 密钥?"
41 | PLEASE_INPUT_NEW_API_KEY: "请输入新的 %{ENV_VAR_NAME} API 密钥:"
42 | PLEASE_INPUT_NEW_API_KEY_FROM_ZERO: "请输入新的 %{ENV_VAR_NAME} API 密钥:"
43 | INVALID_API_KEY_FORMAT: "无效的 API 密钥格式"
44 | RETRY_API_KEY_INPUT: "重试 API 密钥输入"
45 | API_KEY_INPUT_CANCELLED: "API 密钥输入已取消"
46 | UNKNOWN_API_KEY_TYPE: "未知的 API 密钥类型:%{KEY_TYPE}"
47 | UNKNOWN_MODEL_FORMAT: "未知的模型格式:%{MODEL_FORMAT}"
48 | INVALID_API_KEY: "无效的 API 密钥:%{KEY_TYPE}"
49 | API_KEY_SAVED: "%{ENV_VAR_NAME} 的 API 密钥已保存"
50 | UNKNOWN_DOCKERFILE: "未知的 Dockerfile:%{dockerfile}"
51 | UNKNOWN_MODEL_TYPE: "未知的模型类型:%{model_type}"
52 | BACKEND_DOCKERFILE_COPIED: "后端 Dockerfile 已经从 %{source_path} 复制为 %{dest_path}"
53 | FRONTEND_DOCKERFILE_COPIED: "前端 Dockerfile 已经从 %{source_path} 复制为 %{dest_path}"
54 | TEMP_DIR_CREATED: "已在 %{dir} 创建临时目录"
55 | CURRENT_COMPOSE_STOP_FAILED: "当前的容器停止失败"
56 | CONTAINER_STOPPED_AND_REMOVED: "容器已停止并删除"
57 | NETWORKS_PRUNED: "已清理对应的Docker网络"
58 | NETWORK_PRUNE_ERROR: "清理对应的Docker网络时出错:%{error}"
59 | DOCKER_LIST_ERROR: "列出 Docker 容器时出错:%{error}"
60 | CONTAINERS_STOPPED_AND_REMOVED: "已停止并删除 Docker 容器"
61 | CLI_DESCRIPTION: "MindSearch Docker 启动器 - 用于管理 MindSearch docker 容器的工具"
62 | LANGUAGE_HELP: "设置 msdl 工具界面语言(例如:en, zh_CN)"
63 | CONFIG_LANGUAGE_HELP: "显示语言配置提示"
64 | LANGUAGE_NOT_AVAILABLE: "警告:语言'%{lang}'不可用。使用英语作为替代。"
65 | LANGUAGE_CHANGED_RESTARTING: "语言已更改,正在重启 msdl..."
66 | SELECT_SEARCH_ENGINE: "选择搜索引擎:"
67 | NO_API_KEY_NEEDED: "无需 API 密钥"
68 | API_KEY_REQUIRED: "需要 API 密钥"
69 | SEARCH_ENGINE_DUCKDUCKGO: "DuckDuckGo 搜索"
70 | SEARCH_ENGINE_BING: "必应搜索"
71 | SEARCH_ENGINE_BRAVE: "Brave 搜索"
72 | SEARCH_ENGINE_GOOGLE: "Google 搜索"
73 | SEARCH_ENGINE_TENCENT: "腾讯搜索"
74 | TENCENT_ID_REQUIRED: "请输入您的腾讯搜索 Secret ID"
75 | TENCENT_KEY_REQUIRED: "请输入您的腾讯搜索 Secret Key"
76 | WEB_SEARCH_KEY_REQUIRED: "请输入您的网页搜索 API 密钥"
77 | SEARCH_ENGINE_CONFIGURED: "搜索引擎 %{engine} 配置成功"
78 |
--------------------------------------------------------------------------------
/docker/msdl/user_interaction.py:
--------------------------------------------------------------------------------
1 | from InquirerPy import inquirer
2 | import sys
3 | import os
4 | from pathlib import Path
5 |
6 | from msdl.config import (
7 | CLOUD_LLM_DOCKERFILE,
8 | LOCAL_LLM_DOCKERFILE,
9 | )
10 | from msdl.i18n import (
11 | t,
12 | get_available_languages,
13 | set_language,
14 | get_env_variable,
15 | )
16 | from msdl.utils import (
17 | clean_api_key,
18 | get_model_formats,
19 | get_existing_api_key,
20 | save_api_key_to_env,
21 | validate_api_key,
22 | )
23 |
24 | SEARCH_ENGINES = {
25 | "DuckDuckGoSearch": {
26 | "name": "DuckDuckGo",
27 | "key": "DUCKDUCKGO",
28 | "requires_key": False,
29 | "env_var": None
30 | },
31 | "BingSearch": {
32 | "name": "Bing",
33 | "key": "BING",
34 | "requires_key": True,
35 | "env_var": "BING_SEARCH_API_KEY"
36 | },
37 | "BraveSearch": {
38 | "name": "Brave",
39 | "key": "BRAVE",
40 | "requires_key": True,
41 | "env_var": "BRAVE_SEARCH_API_KEY"
42 | },
43 | "GoogleSearch": {
44 | "name": "Google Serper",
45 | "key": "GOOGLE",
46 | "requires_key": True,
47 | "env_var": "GOOGLE_SERPER_API_KEY"
48 | },
49 | "TencentSearch": {
50 | "name": "Tencent",
51 | "key": "TENCENT",
52 | "requires_key": True,
53 | "env_vars": ["TENCENT_SEARCH_SECRET_ID", "TENCENT_SEARCH_SECRET_KEY"]
54 | }
55 | }
56 |
57 | def get_language_choice():
58 | """Get user's language preference"""
59 | def _get_language_options():
60 | available_langs = get_available_languages()
61 | lang_choices = {
62 | "en": "English",
63 | "zh_CN": "中文"
64 | }
65 | return [{"name": f"{lang_choices.get(lang, lang)}", "value": lang} for lang in available_langs]
66 |
67 | current_lang = get_env_variable("LAUNCHER_INTERACTION_LANGUAGE")
68 | if not current_lang:
69 | lang_options = _get_language_options()
70 | language = inquirer.select(
71 | message=t("SELECT_INTERFACE_LANGUAGE"),
72 | choices=lang_options,
73 | default="en"
74 | ).execute()
75 |
76 | if language:
77 | set_language(language)
78 | sys.stdout.flush()
79 | restart_program()
80 |
81 | def get_backend_language():
82 | """Get user's backend language preference"""
83 | return inquirer.select(
84 | message=t("SELECT_BACKEND_LANGUAGE"),
85 | choices=[
86 | {"name": t("CHINESE"), "value": "cn"},
87 | {"name": t("ENGLISH"), "value": "en"},
88 | ],
89 | default="cn",
90 | ).execute()
91 |
92 | def get_model_choice():
93 | """Get user's model deployment type preference"""
94 | model_deployment_type = [
95 | {
96 | "name": t("CLOUD_MODEL"),
97 | "value": CLOUD_LLM_DOCKERFILE
98 | },
99 | {
100 | "name": t("LOCAL_MODEL"),
101 | "value": LOCAL_LLM_DOCKERFILE
102 | },
103 | ]
104 |
105 | return inquirer.select(
106 | message=t("MODEL_DEPLOYMENT_TYPE"),
107 | choices=model_deployment_type,
108 | ).execute()
109 |
110 | def get_model_format(model):
111 | """Get user's model format preference"""
112 | model_formats = get_model_formats(model)
113 | return inquirer.select(
114 | message=t("MODEL_FORMAT_CHOICE"),
115 | choices=[{
116 | "name": format,
117 | "value": format
118 | } for format in model_formats],
119 | ).execute()
120 |
121 | def _handle_api_key_input(env_var_name, message=None):
122 | """Handle API key input and validation for a given environment variable"""
123 | if message is None:
124 | message = t("PLEASE_INPUT_NEW_API_KEY", ENV_VAR_NAME=env_var_name)
125 | print(message)
126 |
127 | while True:
128 | api_key = inquirer.secret(
129 | message=t("PLEASE_INPUT_NEW_API_KEY_FROM_ZERO", ENV_VAR_NAME=env_var_name)
130 | ).execute()
131 | cleaned_api_key = clean_api_key(api_key)
132 |
133 | try:
134 | save_api_key_to_env(env_var_name, cleaned_api_key, t)
135 | break
136 | except ValueError as e:
137 | print(str(e))
138 | retry = inquirer.confirm(
139 | message=t("RETRY_API_KEY_INPUT"), default=True
140 | ).execute()
141 | if not retry:
142 | print(t("API_KEY_INPUT_CANCELLED"))
143 | sys.exit(1)
144 |
145 | def handle_api_key_input(model, model_format):
146 | """Handle API key input and validation"""
147 | if model != CLOUD_LLM_DOCKERFILE:
148 | return
149 |
150 | env_var_name = {
151 | "internlm_silicon": "SILICON_API_KEY",
152 | "gpt4": "OPENAI_API_KEY",
153 | "qwen": "QWEN_API_KEY",
154 | }.get(model_format)
155 |
156 | existing_api_key = get_existing_api_key(env_var_name)
157 |
158 | if existing_api_key:
159 | use_existing = inquirer.confirm(
160 | message=t("CONFIRM_USE_EXISTING_API_KEY", ENV_VAR_NAME=env_var_name),
161 | default=True,
162 | ).execute()
163 |
164 | if use_existing:
165 | return
166 |
167 | print(t("CONFIRM_OVERWRITE_EXISTING_API_KEY", ENV_VAR_NAME=env_var_name))
168 |
169 | try:
170 | save_api_key_to_env(model_format, clean_api_key(inquirer.secret(
171 | message=t("PLEASE_INPUT_NEW_API_KEY_FROM_ZERO", ENV_VAR_NAME=env_var_name)
172 | ).execute()), t)
173 | except ValueError as e:
174 | print(str(e))
175 | retry = inquirer.confirm(
176 | message=t("RETRY_API_KEY_INPUT"), default=True
177 | ).execute()
178 | if not retry:
179 | print(t("API_KEY_INPUT_CANCELLED"))
180 | sys.exit(1)
181 |
182 | def get_search_engine():
183 | """Get user's preferred search engine and handle API key if needed"""
184 | search_engine = inquirer.select(
185 | message=t("SELECT_SEARCH_ENGINE"),
186 | choices=[{
187 | "name": f"{t(f'SEARCH_ENGINE_{info["key"]}')} ({t('NO_API_KEY_NEEDED') if not info['requires_key'] else t('API_KEY_REQUIRED')})",
188 | "value": engine
189 | } for engine, info in SEARCH_ENGINES.items()],
190 | ).execute()
191 |
192 | engine_info = SEARCH_ENGINES[search_engine]
193 |
194 | if engine_info['requires_key']:
195 | if search_engine == "TencentSearch":
196 | # Handle Tencent's special case with two keys
197 | for env_var in engine_info['env_vars']:
198 | is_id = "ID" in env_var
199 | message = t("TENCENT_ID_REQUIRED") if is_id else t("TENCENT_KEY_REQUIRED")
200 | existing_key = get_existing_api_key(env_var)
201 | if existing_key:
202 | use_existing = inquirer.confirm(
203 | message=t("CONFIRM_USE_EXISTING_API_KEY", ENV_VAR_NAME=env_var),
204 | default=True,
205 | ).execute()
206 | if not use_existing:
207 | _handle_api_key_input(env_var, message)
208 | else:
209 | _handle_api_key_input(env_var, message)
210 | else:
211 | # Handle standard case with single WEB_SEARCH_API_KEY
212 | env_var = engine_info['env_var']
213 | existing_key = get_existing_api_key(env_var)
214 | if existing_key:
215 | use_existing = inquirer.confirm(
216 | message=t("CONFIRM_USE_EXISTING_API_KEY", ENV_VAR_NAME=env_var),
217 | default=True,
218 | ).execute()
219 | if not use_existing:
220 | _handle_api_key_input(env_var, t("WEB_SEARCH_KEY_REQUIRED"))
221 | else:
222 | _handle_api_key_input(env_var, t("WEB_SEARCH_KEY_REQUIRED"))
223 |
224 | print(t("SEARCH_ENGINE_CONFIGURED", engine=engine_info['name']))
225 | return search_engine
226 |
227 | def restart_program():
228 | """Restart the current program with the same arguments"""
229 | print(t("LANGUAGE_CHANGED_RESTARTING"))
230 | python = sys.executable
231 | os.execl(python, python, *sys.argv)
232 |
233 | def get_user_choices():
234 | """Get all user choices in a single function"""
235 | # Get language preference
236 | get_language_choice()
237 |
238 | # Get backend language
239 | backend_language = get_backend_language()
240 |
241 | # Get model choice
242 | model = get_model_choice()
243 |
244 | # Get model format
245 | model_format = get_model_format(model)
246 |
247 | # Get search engine choice
248 | search_engine = get_search_engine()
249 |
250 | # Handle API key if needed
251 | handle_api_key_input(model, model_format)
252 |
253 | return backend_language, model, model_format, search_engine
254 |
--------------------------------------------------------------------------------
/docker/msdl/utils.py:
--------------------------------------------------------------------------------
1 | # msdl/utils.py
2 |
3 | import os
4 | import re
5 | import shutil
6 | import sys
7 | import yaml
8 | from functools import lru_cache
9 | from pathlib import Path
10 | from msdl.config import (
11 | BACKEND_DOCKERFILE_DIR,
12 | CLOUD_LLM_DOCKERFILE,
13 | FRONTEND_DOCKERFILE_DIR,
14 | LOCAL_LLM_DOCKERFILE,
15 | PACKAGE_DIR,
16 | REACT_DOCKERFILE,
17 | TEMP_DIR,
18 | ENV_FILE_PATH,
19 | )
20 | from msdl.i18n import t
21 |
22 |
23 | @lru_cache(maxsize=None)
24 | def get_env_variable(var_name, default=None):
25 | if ENV_FILE_PATH.exists():
26 | with ENV_FILE_PATH.open("r") as env_file:
27 | for line in env_file:
28 | if line.startswith(f"{var_name}="):
29 | return line.strip().split("=", 1)[1]
30 | return os.getenv(var_name, default)
31 |
32 |
33 | @lru_cache(maxsize=None)
34 | def get_existing_api_key(env_var_name):
35 | env_vars = read_env_file()
36 | return env_vars.get(env_var_name)
37 |
38 |
39 | @lru_cache(maxsize=None)
40 | def read_env_file():
41 | env_vars = {}
42 | if ENV_FILE_PATH.exists():
43 | with ENV_FILE_PATH.open("r") as env_file:
44 | for line in env_file:
45 | if "=" in line and not line.strip().startswith("#"):
46 | key, value = line.strip().split("=", 1)
47 | env_vars[key] = value.strip('"').strip("'")
48 | return env_vars
49 |
50 |
51 | def clean_api_key(api_key):
52 | cleaned_key = api_key.strip()
53 | cleaned_key = re.sub(r"\s+", "", cleaned_key)
54 | return cleaned_key
55 |
56 |
57 | @lru_cache(maxsize=None)
58 | def validate_api_key(api_key, key_type, t):
59 | basic_pattern = r"^sk-[A-Za-z0-9]+$"
60 | web_search_pattern = r"^[A-Za-z0-9_\-\.]+$"
61 | tencent_pattern = r"^[A-Za-z0-9]+$"
62 |
63 | validation_rules = {
64 | # Model API Keys
65 | "SILICON_API_KEY": basic_pattern,
66 | "OPENAI_API_KEY": basic_pattern,
67 | "QWEN_API_KEY": basic_pattern,
68 | # Search Engine API Keys
69 | "BING_SEARCH_API_KEY": web_search_pattern,
70 | "BRAVE_SEARCH_API_KEY": web_search_pattern,
71 | "GOOGLE_SERPER_API_KEY": web_search_pattern,
72 | "TENCENT_SEARCH_SECRET_ID": tencent_pattern,
73 | "TENCENT_SEARCH_SECRET_KEY": tencent_pattern,
74 | # Legacy support
75 | "WEB_SEARCH_API_KEY": web_search_pattern,
76 | }
77 |
78 | if key_type not in validation_rules:
79 | raise ValueError(t("UNKNOWN_API_KEY_TYPE", KEY_TYPE=key_type))
80 |
81 | pattern = validation_rules[key_type]
82 | return re.match(pattern, api_key) is not None
83 |
84 |
85 | def save_api_key_to_env(key_type, api_key, t):
86 | """Save API key to .env file
87 |
88 | Args:
89 | key_type: Environment variable name or model format
90 | api_key: API key value
91 | t: Translation function
92 | """
93 | # Convert model format to env var name if needed
94 | env_var_name = {
95 | "internlm_silicon": "SILICON_API_KEY",
96 | "gpt4": "OPENAI_API_KEY",
97 | "qwen": "QWEN_API_KEY",
98 | }.get(key_type, key_type) # If not a model format, use key_type directly
99 |
100 | if not validate_api_key(api_key, env_var_name, t):
101 | raise ValueError(t("INVALID_API_KEY", KEY_TYPE=env_var_name))
102 |
103 | env_vars = read_env_file()
104 | env_vars[env_var_name] = api_key
105 |
106 | with ENV_FILE_PATH.open("w") as env_file:
107 | for key, value in env_vars.items():
108 | env_file.write(f"{key}={value}\n")
109 |
110 | print(t("API_KEY_SAVED", ENV_VAR_NAME=env_var_name))
111 |
112 |
113 | def ensure_directory(path):
114 | path = Path(path)
115 | if not path.exists():
116 | path.mkdir(parents=True, exist_ok=True)
117 | print(t("DIR_CREATED", dir=path))
118 |
119 |
120 | def copy_templates_to_temp(template_files):
121 | template_dir = PACKAGE_DIR / "templates"
122 |
123 | ensure_directory(TEMP_DIR)
124 |
125 | for filename in template_files:
126 | src = template_dir / filename
127 | dst = TEMP_DIR / filename
128 | if src.exists():
129 | shutil.copy2(src, dst)
130 | print(t("FILE_COPIED", file=filename))
131 | else:
132 | print(t("FILE_NOT_FOUND", file=filename))
133 | sys.exit(1)
134 |
135 |
136 | def modify_docker_compose(model_type, backend_language, model_format, search_engine):
137 | """Modify docker-compose.yaml based on user choices"""
138 | docker_compose_path = os.path.join(TEMP_DIR, "docker-compose.yaml")
139 | with open(docker_compose_path, "r") as file:
140 | compose_data = yaml.safe_load(file)
141 |
142 | # Set the name of the project
143 | compose_data["name"] = "mindsearch"
144 |
145 | # Configure backend service
146 | backend_service = compose_data["services"]["backend"]
147 |
148 | # Set environment variables
149 | if "environment" not in backend_service:
150 | backend_service["environment"] = []
151 |
152 | # Add or update environment variables
153 | env_vars = {
154 | "LANG": backend_language,
155 | "MODEL_FORMAT": model_format,
156 | "SEARCH_ENGINE": search_engine
157 | }
158 |
159 | # Ensure .env file is included
160 | if "env_file" not in backend_service:
161 | backend_service["env_file"] = [".env"]
162 | elif ".env" not in backend_service["env_file"]:
163 | backend_service["env_file"].append(".env")
164 |
165 | # Set command with all parameters
166 | command = f"python -m mindsearch.app --lang {backend_language} --model_format {model_format} --search_engine {search_engine}"
167 | backend_service["command"] = command
168 |
169 | # Convert environment variables to docker-compose format
170 | backend_service["environment"] = [
171 | f"{key}={value}" for key, value in env_vars.items()
172 | ]
173 |
174 | # Configure based on model type
175 | if model_type == CLOUD_LLM_DOCKERFILE:
176 | if "deploy" in backend_service:
177 | del backend_service["deploy"]
178 | # Remove volumes for cloud deployment
179 | if "volumes" in backend_service:
180 | del backend_service["volumes"]
181 | elif model_type == LOCAL_LLM_DOCKERFILE:
182 | # Add GPU configuration for local deployment
183 | if "deploy" not in backend_service:
184 | backend_service["deploy"] = {
185 | "resources": {
186 | "reservations": {
187 | "devices": [
188 | {"driver": "nvidia", "count": 1, "capabilities": ["gpu"]}
189 | ]
190 | }
191 | }
192 | }
193 | # Add volume for cache in local deployment
194 | backend_service["volumes"] = ["/root/.cache:/root/.cache"]
195 | else:
196 | raise ValueError(t("UNKNOWN_DOCKERFILE", dockerfile=model_type))
197 |
198 | # Save the modified docker-compose.yaml
199 | with open(docker_compose_path, "w") as file:
200 | yaml.dump(compose_data, file)
201 |
202 | print(
203 | t(
204 | "docker_compose_updated",
205 | mode=(t("CLOUD") if model_type == CLOUD_LLM_DOCKERFILE else t("LOCAL")),
206 | format=model_format,
207 | )
208 | )
209 |
210 |
211 | def get_model_formats(model_type):
212 | if model_type == CLOUD_LLM_DOCKERFILE:
213 | return ["internlm_silicon", "qwen", "gpt4"]
214 | elif model_type == LOCAL_LLM_DOCKERFILE:
215 | return ["internlm_server", "internlm_client", "internlm_hf"]
216 | else:
217 | raise ValueError(t("UNKNOWN_MODEL_TYPE", model_type=model_type))
218 |
219 |
220 | def copy_backend_dockerfile(choice):
221 | """Copy backend Dockerfile to temp directory based on user choice"""
222 | source_file = Path(BACKEND_DOCKERFILE_DIR) / choice
223 | dest_file = "backend.dockerfile"
224 | source_path = PACKAGE_DIR / "templates" / source_file
225 | dest_path = TEMP_DIR / dest_file
226 |
227 | if not source_path.exists():
228 | raise FileNotFoundError(t("FILE_NOT_FOUND", file=source_file))
229 |
230 | dest_path.parent.mkdir(parents=True, exist_ok=True)
231 | dest_path.write_text(source_path.read_text())
232 | print(
233 | t(
234 | "BACKEND_DOCKERFILE_COPIED",
235 | source_path=str(source_path),
236 | dest_path=str(dest_path),
237 | ))
238 |
239 |
240 | def copy_frontend_dockerfile():
241 | """Copy frontend Dockerfile to temp directory"""
242 | source_file = Path(FRONTEND_DOCKERFILE_DIR) / REACT_DOCKERFILE
243 | dest_file = "frontend.dockerfile"
244 | source_path = PACKAGE_DIR / "templates" / source_file
245 | dest_path = TEMP_DIR / dest_file
246 |
247 | if not source_path.exists():
248 | raise FileNotFoundError(t("FILE_NOT_FOUND", file=source_file))
249 |
250 | dest_path.parent.mkdir(parents=True, exist_ok=True)
251 | dest_path.write_text(source_path.read_text())
252 | print(
253 | t(
254 | "FRONTEND_DOCKERFILE_COPIED",
255 | source_path=str(source_path),
256 | dest_path=str(dest_path),
257 | ))
258 |
--------------------------------------------------------------------------------
/docker/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 |
3 | setup(
4 | name="msdl",
5 | version="0.1.1",
6 | description="MindSearch Docker Launcher",
7 | packages=find_packages(),
8 | python_requires=">=3.7",
9 | install_requires=[
10 | "pyyaml>=6.0",
11 | "python-i18n>=0.3.9",
12 | "inquirerpy>=0.3.4",
13 | "python-dotenv>=0.19.1",
14 | ],
15 | entry_points={
16 | "console_scripts": [
17 | "msdl=msdl.__main__:main",
18 | ],
19 | },
20 | include_package_data=True,
21 | package_data={
22 | "msdl": ["translations/*.yaml", "templates/*"],
23 | },
24 | )
25 |
--------------------------------------------------------------------------------
/frontend/React/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
3 |
4 | # dependencies
5 | /node_modules
6 | /.pnp
7 | .pnp.js
8 |
9 | # testing
10 | /coverage
11 |
12 | # production
13 | /build
14 |
15 | # misc
16 | .DS_Store
17 | .env.local
18 | .env.development.local
19 | .env.test.local
20 | .env.production.local
21 |
22 | npm-debug.log*
23 | yarn-debug.log*
24 | yarn-error.log*
25 |
--------------------------------------------------------------------------------
/frontend/React/.prettierignore:
--------------------------------------------------------------------------------
1 | dist
2 | deploy
3 | values
4 | node_modules
5 | .gitignore
6 | .prettierignore
7 | .husky
--------------------------------------------------------------------------------
/frontend/React/.prettierrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "printWidth": 120,
3 | "tabWidth": 4,
4 | "singleQuote": true,
5 | "quoteProps": "as-needed",
6 | "bracketSpacing": true
7 | }
8 |
--------------------------------------------------------------------------------
/frontend/React/README.md:
--------------------------------------------------------------------------------
1 | # Notice
2 | - If you leave the page (Make the page invisible) and come back again, it will cause sse to reconnect.
3 | - the project requires Node.js version >= 18.0.0.
4 |
5 | # Prepare your dev-environment for frontend
6 | [Node.js](https://nodejs.org/en)® is a free, open-source, cross-platform JavaScript runtime environment that lets developers create servers, web apps, command line tools and scripts.
7 |
8 | # Node.js Installation Guide (Windows, Linux, macOS)
9 | ## Windows Installation
10 | - Step 1: Download Node.js
11 |
12 | 1. Open your web browser and visit the [Node.js official website](https://nodejs.org/en).
13 |
14 | 2. Navigate to the "Downloads" section.
15 |
16 | 3. Select the desired version (LTS recommended for long-term stability). As of August 2024, the latest LTS version might be v20.x.x.
17 |
18 | 4. Click on the "Windows Installer (.msi)" link to download the installation package.
19 |
20 | - Step 2: Install Node.js
21 |
22 | 1. Double-click the downloaded .msi file to start the installation wizard.
23 |
24 | 2. Click "Next" to proceed.
25 |
26 | 3. Read and accept the license agreement by checking the "I accept the terms in the License Agreement" box.
27 |
28 | 4. Click "Next" again and select the installation directory. It's recommended to change the default location to avoid installing in the C drive.
29 |
30 | 5. Continue clicking "Next" to use the default settings until you reach the "Install" button.
31 |
32 | 6. Click "Install" to start the installation process.
33 |
34 | 7. Wait for the installation to complete and click "Finish" to exit the installation wizard.
35 |
36 | - Step 3: Verify Installation
37 | 1. Open the Command Prompt (cmd) by pressing `Win + R`, typing `cmd`, and pressing Enter.
38 | 2. Type `node -v` and press Enter. You should see the installed Node.js version displayed.
39 | 3. Type `npm -v` and press Enter to verify the installed npm version. npm is the package manager that comes bundled with Node.js.
40 |
41 | - Step 4: Configure npm Global Path (Optional)
42 | If you want to change the default global installation path for npm, follow these steps:
43 |
44 | 1. Open the Command Prompt (cmd) as an administrator.
45 |
46 | 2. Navigate to your Node.js installation directory (e.g., C:\Program Files\nodejs).
47 |
48 | 3. Create two new folders named node_global and node_cache.
49 |
50 | 4. Run the following commands to set the new paths:
51 |
52 | ```bash
53 | npm config set prefix "C:\Program Files\nodejs\node_global"
54 | npm config set cache "C:\Program Files\nodejs\node_cache"
55 | ```
56 |
57 | 5. Open the Environment Variables settings in the System Properties.
58 | 6. Add `C:\Program Files\nodejs\node_global` to the `PATH` variable under User Variables.
59 | 7. Optionally, create a new system variable named `NODE_PATH` and set its value to ` C:\Program Files\nodejs\node_global\node_modules`.
60 |
61 | ## Linux Installation
62 | - Step 1: Update Your System
63 | Before installing Node.js, ensure your Linux system is up-to-date:
64 |
65 | ```bash
66 | sudo apt-get update
67 | sudo apt-get upgrade
68 | ```
69 |
70 | - Step 2: Install Dependencies
71 | Node.js requires certain dependencies to function properly:
72 |
73 | ```bash
74 | sudo apt-get install build-essential libssl-dev
75 | ```
76 |
77 | - Step 3: Download and Install Node.js
78 | You can download the Node.js source code or use a package manager like `curl` or `wget` to download a pre-built binary. For simplicity, this guide assumes you're using a package manager.
79 |
80 | 1. Navigate to the Node.js download page for package managers.
81 | Follow the instructions for your Linux distribution. For example, on Ubuntu, you can use:
82 |
83 | ```bash
84 | curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash -
85 | sudo apt-get install -y nodejs
86 | ```
87 |
88 | Replace 20.x with the desired version number if you don't want the latest version.
89 |
90 | - Step 4: Verify Installation
91 | 1. Open a terminal.
92 | 2. Type `node -v` and press Enter to check the Node.js version.
93 | 3. Type `npm -v` and press Enter to verify the npm version.
94 |
95 |
96 | ## Installing Node.js on macOS
97 |
98 | Installing Node.js on macOS is a straightforward process that can be accomplished using the official installer from the Node.js website or through package managers like Homebrew. This guide will cover both methods.
99 |
100 | ### Method 1: Using the Official Installer
101 | - Visit the Node.js Website
102 | - Open your web browser and navigate to https://nodejs.org/.
103 | - Download the Installer
104 | - Scroll down to the "Downloads" section.
105 | - Click on the "macOS Installer" button to download the .pkg file. Ensure you download the latest version, which as of August 2024, might be v20.x.x or higher.
106 | - Install Node.js
107 | - Once the download is complete, locate the .pkg file in your Downloads folder.
108 | - Double-click the file to start the installation process.
109 | - Follow the on-screen instructions. Typically, you'll need to agree to the license agreement, select an installation location (the default is usually fine), and click "Continue" or "Install" until the installation is complete.
110 | - Verify the Installation
111 | - Open the Terminal application by going to "Finder" > "Applications" > "Utilities" > "Terminal" or using Spotlight Search (press `Cmd + Space` and type "Terminal").
112 | - Type `node -v` and press Enter. This command should display the installed version of Node.js.
113 | - Type `npm -v` and press Enter to verify that npm, the Node.js package manager, is also installed.
114 |
115 | ### Method 2: Using Homebrew
116 | If you prefer to use a package manager, Homebrew is a popular choice for macOS.
117 |
118 | - Install Homebrew (if not already installed)
119 |
120 | - Open the Terminal.
121 |
122 | - Copy and paste the following command into the Terminal and press Enter:
123 | ```bash
124 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
125 | ```
126 |
127 | - Follow the on-screen instructions to complete the Homebrew installation.
128 |
129 | - Install Node.js with Homebrew
130 | - Once Homebrew is installed, update your package list by running brew update in the Terminal.
131 | - To install Node.js, run the following command in the Terminal:
132 | ```bash
133 | brew install node
134 | ```
135 | - Homebrew will download and install the latest version of Node.js and npm.
136 | - Verify the Installation
137 | - As with the official installer method, you can verify the installation by typing node -v and npm -v in the Terminal and pressing Enter.
138 |
139 | ### Additional Configuration (Optional)
140 | - Configure npm's Global Installation Path (if desired):
141 | - You may want to change the default location where globally installed npm packages are stored. Follow the steps outlined in the Node.js documentation or search for guides online to configure this.
142 | - Switch to a Different Node.js Version (if needed):
143 | - If you need to switch between multiple Node.js versions, consider using a version manager like nvm (Node Version Manager). Follow the instructions on the nvm GitHub page to install and use it.
144 |
145 |
146 | By following these steps, you should be able to successfully install Node.js on your system. Remember to keep your Node.js and npm versions up-to-date to take advantage of the latest features and security updates.
147 |
148 | If your env has been prepared, you can
149 |
150 | # Installation and Setup Instructions
151 |
152 | ## Installation
153 | ```
154 | npm install
155 | ```
156 |
157 | ## Start Server
158 | ```
159 | npm start
160 | ```
161 |
162 | ## Visit Server
163 | ```
164 | http://localhost:8080
165 | ```
166 |
167 | pay attention to the real port in your terminal.maybe it won`t be 8080.
168 |
169 | # Config
170 | ## How to modify the request URL
171 |
172 | - Open the file `vite.config.ts`, modify the target like:
173 |
174 | ```
175 | server: {
176 | port: 8080,
177 | proxy: {
178 | "/solve": {
179 | target: "{HOST}:{PORT}",
180 | changeOrigin: true,
181 | }
182 | }
183 | }
184 | ```
--------------------------------------------------------------------------------
/frontend/React/README_zh-CN.md:
--------------------------------------------------------------------------------
1 | # Notice
2 | 问题回答过程中离开页面后再回到页面,会导致sse重连!
3 | # 开始
4 | ## 请使用大于18.0.0的node版本
5 | ## 准备node.js开发环境
6 | Node.js 是一个基于 Chrome V8 引擎的 JavaScript 运行环境,允许你在服务器端运行 JavaScript。以下是在 Windows、Linux 和 macOS 上安装 Node.js 的详细步骤。
7 |
8 | ### 在 Windows 上安装 Node.js
9 | - 步骤 1: 访问 Node.js 官网
10 |
11 | 打开浏览器,访问 [Node.js](https://nodejs.org/zh-cn/download/prebuilt-installer) 官方网站。
12 |
13 | - 步骤 2: 下载 Node.js 安装包
14 |
15 | 选择你需要的nodejs版本,设备的类型,点击下载,示例如下图:
16 | 
17 |
18 | - 步骤 3: 安装 Node.js
19 |
20 | 双击下载的安装包开始安装。
21 |
22 | 跟随安装向导的指示进行安装。在安装过程中,你可以选择安装位置、是否将 Node.js 添加到系统 PATH 环境变量等选项。推荐选择“添加到 PATH”以便在任何地方都能通过命令行访问 Node.js。
23 | 安装完成后,点击“Finish”结束安装。
24 |
25 | - 步骤 4: 验证安装
26 |
27 | 打开命令提示符(CMD)或 PowerShell。
28 | 输入 node -v 并回车,如果系统返回了 Node.js 的版本号,说明安装成功。
29 | 接着,输入 npm -v 并回车,npm 是 Node.js 的包管理器,如果返回了版本号,表示 npm 也已正确安装。
30 |
31 | ### 在 Linux 上安装 Node.js
32 | 注意: 由于 Linux 发行版众多,以下以 Ubuntu 为例说明,其他发行版(如 CentOS、Debian 等)的安装方式可能略有不同,可自行查询对应的安装办法。
33 |
34 | - 步骤 1: 更新你的包管理器
35 |
36 | 打开终端。
37 |
38 | 输入 sudo apt update 并回车,以更新 Ubuntu 的包索引。
39 |
40 | - 步骤 2: 安装 Node.js
41 |
42 | 对于 Ubuntu 18.04 及更高版本,Node.js 可以直接从 Ubuntu 的仓库中安装。
43 | 输入 sudo apt install nodejs npm 并回车。
44 | 对于旧版本的 Ubuntu 或需要安装特定版本的 Node.js,你可能需要使用如 NodeSource 这样的第三方仓库。
45 |
46 | - 步骤 3: 验证安装
47 |
48 | 在终端中,输入 node -v 和 npm -v 来验证 Node.js 和 npm 是否已正确安装。
49 |
50 | ### 在 macOS 上安装 Node.js
51 |
52 | #### 下载安装
53 | - 步骤 1: 访问 Node.js 官网
54 |
55 | 打开浏览器,访问 Node.js 官方网站。
56 |
57 | - 步骤 2: 下载 Node.js 安装包
58 |
59 | 在首页找到 macOS 对应的安装包(通常是 .pkg 文件),点击下载。
60 |
61 | - 步骤 3: 安装 Node.js
62 |
63 | 找到下载的 .pkg 文件,双击打开。
64 | 跟随安装向导的指示进行安装。
65 | 安装完成后,点击“Close”结束安装。
66 |
67 | - 步骤 4: 验证安装
68 |
69 | 打开终端。
70 |
71 | 输入 node -v 和 npm -v 来验证 Node.js 和 npm 是否已正确安装。
72 |
73 | #### 使用HomeBrew安装
74 | 前提条件:确保你的macOS上已经安装了Homebrew。如果尚未安装,可以通过以下命令进行安装(以终端操作为例):
75 | ```
76 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
77 | ```
78 | 按照提示输入密码以确认安装。安装过程中,可能需要你同意许可协议等。
79 |
80 | - 打开终端:
81 | 在macOS上找到并打开“终端”应用程序。
82 |
83 | - 使用Homebrew安装Node.js:
84 | 在终端中输入以下命令来安装最新版本的Node.js
85 | ```
86 | brew install node
87 | ```
88 | Homebrew会自动下载Node.js的安装包,并处理相关的依赖项和安装过程。你需要等待一段时间,直到安装完成。
89 |
90 | - 验证安装:
91 | 安装完成后,你可以通过输入以下命令来验证Node.js是否成功安装:
92 | ```
93 | node -v
94 | ```
95 | 如果终端输出了Node.js的版本号,那么表示安装成功。同时,你也可以通过输入npm -v来验证npm(Node.js的包管理器)是否也成功安装。
96 |
97 | 完成以上步骤后,你应该能在你的 Windows、Linux 或 macOS 系统上成功安装并运行 Node.js。
98 |
99 | ### 更多
100 | 如需了解更多,可参照:https://nodejs.org/en
101 |
102 | 如环境已经准备好,跳转下一步
103 |
104 | ## 安装依赖
105 | 进入前端项目根目录
106 | ```
107 | npm install
108 | ```
109 |
110 | ## 启动
111 | ```
112 | npm start
113 | ```
114 |
115 | 启动成功后,界面将出现可访问的本地url
116 |
117 | ## 配置
118 | ### 接口请求配置
119 |
120 | - 在vite.config.ts中配置proxy,示例如下:
121 |
122 | ```
123 | server: {
124 | port: 8080,
125 | proxy: {
126 | "/solve": {
127 | target: "{HOST}:{PORT}",
128 | changeOrigin: true,
129 | }
130 | }
131 | }
132 | ```
133 |
134 | ## 知悉
135 | - 前端服务基于react开发,如需了解react相关知识,可参考:https://react.dev/
--------------------------------------------------------------------------------
/frontend/React/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/frontend/React/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "test-react-flow",
3 | "private": true,
4 | "version": "0.0.0",
5 | "type": "module",
6 | "scripts": {
7 | "start": "vite --host --mode dev",
8 | "start:dev": "vite --host --mode dev",
9 | "start:staging": "vite --host --mode staging",
10 | "start:prod": "vite --host --mode production",
11 | "build": "tsc && vite build",
12 | "build:dev": "tsc && vite build --mode dev",
13 | "build:staging": "tsc && vite build --mode staging",
14 | "build:prod": "tsc && vite build --mode production",
15 | "preview": "vite preview",
16 | "prettier": "prettier --write ."
17 | },
18 | "devDependencies": {
19 | "@babel/plugin-proposal-optional-chaining": "^7.21.0",
20 | "@types/classnames": "^2.3.1",
21 | "@types/js-cookie": "^3.0.3",
22 | "@types/node": "^18.15.11",
23 | "@types/react": "^18.0.28",
24 | "@types/react-dom": "^18.0.11",
25 | "@vitejs/plugin-legacy": "^4.0.2",
26 | "@vitejs/plugin-react": "^3.1.0",
27 | "husky": "^9.0.11",
28 | "less": "^4.1.3",
29 | "lint-staged": "^15.2.7",
30 | "prettier": "^3.0.0",
31 | "react": "^18.2.0",
32 | "react-dom": "^18.2.0",
33 | "terser": "^5.16.9",
34 | "typescript": "^4.9.3",
35 | "vite": "^4.2.1",
36 | "vite-babel-plugin": "^0.0.2"
37 | },
38 | "dependencies": {
39 | "@antv/x6": "^2.18.1",
40 | "@microsoft/fetch-event-source": "^2.0.1",
41 | "antd": "^5.18.3",
42 | "axios": "^1.3.5",
43 | "classnames": "^2.5.1",
44 | "elkjs": "^0.9.3",
45 | "js-cookie": "^3.0.1",
46 | "react-markdown": "^9.0.1",
47 | "react-router": "^6.11.2",
48 | "react-router-dom": "^6.11.2",
49 | "reactflow": "^11.11.3",
50 | "rehype-raw": "^7.0.0"
51 | },
52 | "lint-staged": {
53 | "**/*.{ts, tsx, less, module.less, json, md, .html}": "prettier --write ."
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/frontend/React/src/App.module.less:
--------------------------------------------------------------------------------
1 | .app {
2 | height: 100%;
3 | display: flex;
4 | justify-content: space-between;
5 | background: url(./assets/background.png) rgb(247, 248, 255);
6 | background-size: cover;
7 | overflow: hidden;
8 | }
9 |
10 | .content {
11 | padding: 64px 0 16px 0;
12 | width: 100%;
13 | height: 100%;
14 | box-sizing: border-box;
15 | }
16 |
17 | .header {
18 | position: fixed;
19 | padding: 16px 32px;
20 | width: 100%;
21 | display: flex;
22 | align-items: center;
23 | box-sizing: border-box;
24 |
25 | &-nav {
26 | flex: 1;
27 |
28 | img {
29 | height: 40px;
30 | }
31 |
32 | a {
33 | display: inline-block;
34 | text-decoration: none;
35 | color: black;
36 |
37 | &:not(:first-of-type) {
38 | margin-left: 40px;
39 | }
40 |
41 | &.active {
42 | font-weight: bold;
43 | }
44 | }
45 | }
46 |
47 | &-opt {
48 | flex-shrink: 0;
49 | display: flex;
50 | align-items: center;
51 | }
52 | }
--------------------------------------------------------------------------------
/frontend/React/src/App.tsx:
--------------------------------------------------------------------------------
1 | import style from "./App.module.less";
2 |
3 | import { BrowserRouter } from "react-router-dom";
4 | import RouterRoutes from "@/routes/routes";
5 | import Logo from "@/assets/logo.svg";
6 |
7 | function App() {
8 | return (
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 | );
23 | }
24 |
25 | export default App;
26 |
--------------------------------------------------------------------------------
/frontend/React/src/assets/background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InternLM/MindSearch/bfb41be176f95700bcf199be69b001096425bbbd/frontend/React/src/assets/background.png
--------------------------------------------------------------------------------
/frontend/React/src/assets/fold-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/frontend/React/src/assets/pack-up.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/frontend/React/src/assets/sendIcon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/frontend/React/src/assets/show-right-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InternLM/MindSearch/bfb41be176f95700bcf199be69b001096425bbbd/frontend/React/src/assets/show-right-icon.png
--------------------------------------------------------------------------------
/frontend/React/src/assets/unflod-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/frontend/React/src/global.d.ts:
--------------------------------------------------------------------------------
1 | declare module 'event-source-polyfill';
--------------------------------------------------------------------------------
/frontend/React/src/index.less:
--------------------------------------------------------------------------------
1 | body,
2 | html,
3 | #root {
4 | padding: 0;
5 | margin: 0;
6 | width: 100%;
7 | height: 100%;
8 | font-family: "PingFang SC";
9 | font-size: 14px;
10 | line-height: 21px;
11 | }
12 |
13 | #global__message-container {
14 | position: fixed;
15 | left: 0;
16 | right: 0;
17 | top: 72px;
18 | z-index: 999;
19 | display: flex;
20 | flex-direction: column;
21 | justify-content: center;
22 | align-items: center;
23 | }
24 |
25 | .f {
26 | color: #6674D6;
27 | font-family: DIN;
28 | font-size: 12px;
29 | font-style: normal;
30 | font-weight: 500;
31 | line-height: 14px;
32 | position: relative;
33 | top: -4px;
34 | padding: 0 3px;
35 |
36 | &::after {
37 | content: '·';
38 | position: absolute;
39 | top: 0;
40 | right: -2px;
41 | color: #6674D6;
42 | }
43 | }
44 |
45 | p> :nth-last-child(1).f,
46 | li> :nth-last-child(1).f {
47 | &::after {
48 | content: '';
49 | opacity: 0;
50 | }
51 | }
52 |
53 | .fnn2 {
54 | color: #6674D6;
55 | font-family: DIN;
56 | font-size: 14px;
57 | font-style: normal;
58 | font-weight: 500;
59 | line-height: 14px;
60 | position: relative;
61 | top: -2px;
62 | }
--------------------------------------------------------------------------------
/frontend/React/src/index.tsx:
--------------------------------------------------------------------------------
1 | import React from "react";
2 | import ReactDOM from "react-dom/client";
3 | import "./index.less";
4 | import App from "./App";
5 |
6 | ReactDOM.createRoot(document.getElementById("root") as HTMLElement).render(
7 |
8 |
9 | ,
10 | );
11 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/bookmark-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/fold-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/mindsearch-avatar.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/pack-up-disabled.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/pack-up.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/sendIcon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/think-progress-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/assets/unflod-icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/answer/index.module.less:
--------------------------------------------------------------------------------
1 | .answer {
2 | display: flex;
3 | justify-content: flex-start;
4 | align-items: flex-start;
5 | width: 100%;
6 |
7 | .avatar {
8 | width: 32px;
9 | height: 32px;
10 | margin-right: 16px;
11 | border-radius: 50%;
12 | flex-shrink: 0;
13 |
14 | img {
15 | width: 100%;
16 | }
17 | }
18 |
19 | .reaponseAarea {
20 | display: flex;
21 | flex-direction: column;
22 | width: calc(100% - 48px);
23 | background-color: #F4F5F9;
24 | padding: 12px 16px;
25 | border-radius: 16px;
26 | overflow-x: hidden;
27 | }
28 |
29 | .inner {
30 | width: 100%;
31 | overflow-x: hidden;
32 | background-color: #fff;
33 | border-radius: 16px;
34 | border: 1px solid var(----line-2, #EBECF0);
35 | box-sizing: border-box;
36 | transition: all 0.5s ease;
37 | margin-bottom: 16px;
38 | position: relative;
39 | }
40 |
41 | .graphIcon {
42 | padding: 2px 8px;
43 | display: flex;
44 | justify-content: center;
45 | align-items: center;
46 | border-radius: 8px;
47 | border: 1px solid var(----line-2, #EBECF0);
48 | background: var(---fill-0, #FFF);
49 | color: #121316CC;
50 | font-size: 14px;
51 | line-height: 24px;
52 | cursor: pointer;
53 |
54 | svg {
55 | margin-left: 4px;
56 | }
57 |
58 | &:hover {
59 | background-color: #D7D8DD;
60 | color: #121316CC;
61 |
62 | svg path {
63 | fill: #121316CC;
64 | }
65 | }
66 | }
67 |
68 | .showGraph {
69 | width: 118px;
70 | margin-bottom: 16px;
71 | border-radius: 8px;
72 | border: 1px solid var(----line-2, #EBECF0);
73 | background: var(---fill-0, #FFF);
74 | }
75 |
76 | .closeGraph {
77 | position: absolute;
78 | right: 12px;
79 | bottom: 12px;
80 | }
81 |
82 | .reaponse {
83 | color: #121316;
84 | font-size: 14px;
85 | line-height: 24px;
86 | padding: 18px 42px;
87 | }
88 |
89 | // h3 {
90 | // font-size: 24px;
91 | // font-weight: 600;
92 | // line-height: 36px;
93 | // margin: 0 0 16px 0;
94 | // }
95 |
96 | // h4 {
97 | // font-size: 20px;
98 | // font-weight: 600;
99 | // line-height: 30px;
100 | // margin: 0 0 8px 0;
101 | // }
102 | }
103 |
104 | .draft {
105 | width: 100%;
106 | white-space: wrap;
107 | display: flex;
108 | justify-content: flex-start;
109 | align-items: flex-start;
110 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/answer/index.tsx:
--------------------------------------------------------------------------------
1 | import styles from './index.module.less';
2 | import MindMapGraph from '../mind-map';
3 | import { MindsearchContext } from '../../provider/context';
4 | import MindSearchAvatar from '../../assets/mindsearch-avatar.svg';
5 | import CustomMarkdown from '../custom-markdown';
6 | import { useState, useEffect, useContext } from 'react';
7 | import classNames from 'classnames';
8 |
9 | interface IProps {
10 | adjList: any;
11 | isEnd: boolean;
12 | response: string;
13 | refList: any;
14 | listId: number;
15 | question: string;
16 | handleNodeClick: (info: any, idx: number) => void;
17 | }
18 |
19 | const Answer = ({ refList = null, adjList, isEnd, response = '', listId, handleNodeClick, question = '' }: IProps) => {
20 | const { chatIsOver } = useContext(MindsearchContext);
21 | const [showGraph, setShowGraph] = useState(true);
22 | // 整体的渲染树
23 | const [renderData, setRenderData] = useState([]);
24 |
25 | const toggleGraph = () => {
26 | setShowGraph(!showGraph);
27 | };
28 |
29 | const handleClick = (node: string) => {
30 | handleNodeClick(node, listId);
31 | };
32 |
33 | const generateMapData = (arr: []) => {
34 | const tempArr: any[] = arr.map((item: { name: string; id: number; state: number }) => {
35 | if (item.name && adjList[item.name]) {
36 | return {
37 | ...item,
38 | children: generateMapData(adjList?.[item.name]),
39 | };
40 | }
41 | });
42 | return tempArr;
43 | };
44 |
45 | const convertTreeData = () => {
46 | const root: any = {
47 | id: 0,
48 | name: '原始问题',
49 | state: 3,
50 | children: generateMapData(adjList?.root || []),
51 | };
52 |
53 | // 返回包含根节点的数组
54 | // console.log('renderData-----------', [root]);
55 | setRenderData([root]);
56 | };
57 |
58 | useEffect(() => {
59 | if (!adjList || Object.keys(adjList)?.length < 2) {
60 | setRenderData([]);
61 | return;
62 | };
63 | convertTreeData();
64 | }, [adjList]);
65 |
66 | return
67 |
68 |
69 |
70 |
71 | {
72 | showGraph ? <>
73 | {
74 | (renderData?.length > 0) &&
75 |
82 |
83 | 收起
84 |
85 |
86 |
87 |
88 |
89 | }
90 | >
91 | :
92 | 查看思考节点
93 |
94 |
95 |
96 |
97 | }
98 |
99 | {response && (
100 |
101 |
102 |
103 | )}
104 |
105 |
106 | };
107 |
108 | export default Answer;
109 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/answer/loading-animation/index.module.less:
--------------------------------------------------------------------------------
1 | .loading,
2 | .loading > div {
3 | position: relative;
4 | box-sizing: border-box;
5 | }
6 |
7 | .loading {
8 | display: flex;
9 | justify-content: center;
10 | align-items: center;
11 | font-size: 0;
12 | color: #fff;
13 | background-color: #f90;
14 | width: 20px;
15 | height: 20px;
16 | border-radius: 50%;
17 | margin-right: 3px;
18 | flex-shrink: 0;
19 | margin-top: 4px;
20 | }
21 |
22 | .loading > div {
23 | display: inline-block;
24 | float: none;
25 | background-color: currentColor;
26 | border: 0 solid currentColor;
27 | }
28 |
29 | .loading > div:nth-child(1) {
30 | animation-delay: -200ms;
31 | }
32 |
33 | .loading > div:nth-child(2) {
34 | animation-delay: -100ms;
35 | }
36 |
37 | .loading > div:nth-child(3) {
38 | animation-delay: 0ms;
39 | }
40 |
41 | .loading > div {
42 | width: 3px;
43 | height: 3px;
44 | margin: 2px 1px;
45 | border-radius: 100%;
46 | animation: ball-pulse 1s ease infinite;
47 | }
48 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/answer/loading-animation/index.tsx:
--------------------------------------------------------------------------------
1 | import styles from './index.module.less';
2 |
3 | const LoadingAnimation = () => {
4 | return (
5 |
10 | );
11 | };
12 |
13 | export default LoadingAnimation;
14 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/chat-right/components/empty-placeholder/index.module.less:
--------------------------------------------------------------------------------
1 | .emptyDiv {
2 | width: 280px;
3 | height: 100%;
4 | margin: auto;
5 | display: flex;
6 | justify-content: center;
7 | align-items: center;
8 | flex-direction: column;
9 |
10 | .pic {
11 | margin-bottom: 8px;
12 | }
13 |
14 | p {
15 | color: var(--80-text-4, rgba(18, 19, 22, 0.80));
16 | text-align: center;
17 | font-feature-settings: 'liga' off, 'clig' off;
18 |
19 | /* 段落正文/常规text-1-paragraph-regular */
20 | font-family: "PingFang SC";
21 | font-size: 14px;
22 | font-style: normal;
23 | font-weight: 400;
24 | line-height: 24px;
25 | /* 171.429% */
26 | }
27 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/chat-right/components/empty-placeholder/index.tsx:
--------------------------------------------------------------------------------
1 | import styles from './index.module.less';
2 | import EmptyRightChatImg from '../../../../assets/empty-chat-right.svg';
3 |
4 | const EmptyPlaceHolder = () => {
5 | return <>
6 |
7 |
8 |
9 |
10 |
11 | 请在节点图中选择节点后查看哦~
12 |
13 |
14 | >
15 | };
16 |
17 | export default EmptyPlaceHolder;
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/chat-right/components/query-item/index.module.less:
--------------------------------------------------------------------------------
1 | .query {
2 | &-Item {
3 | display: inline-flex;
4 | padding: 6px 12px 6px 0;
5 | margin-right: 12px;
6 | margin-bottom: 8px;
7 | color: rgba(18, 19, 22, 0.8);
8 | font-size: 14px;
9 | line-height: 24px;
10 | box-sizing: border-box;
11 | overflow: hidden;
12 | position: relative;
13 |
14 | &:last-child {
15 | &::after {
16 | display: none;
17 | }
18 | }
19 |
20 | &::after {
21 | position: absolute;
22 | right: 0;
23 | top: 10px;
24 | width: 1px;
25 | height: 16px;
26 | border-right: 1px solid #ebecf0;
27 | content: '';
28 | }
29 | }
30 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/chat-right/components/query-item/index.tsx:
--------------------------------------------------------------------------------
1 | import classNames from 'classnames';
2 | import styles from './index.module.less';
3 |
4 | interface IQueryItemProps {
5 | item: string;
6 | }
7 | const QueryItem = ({ item }: IQueryItemProps) => {
8 | return {item}
;
9 | };
10 |
11 | export default QueryItem;
12 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/chat-right/components/search-item/index.module.less:
--------------------------------------------------------------------------------
1 | .searchItem {
2 | border-radius: 12px;
3 | margin-bottom: 4px;
4 | padding: 8px;
5 | transition: all 0.5s ease-in-out;
6 | display: flex;
7 | justify-content: flex-start;
8 | align-items: flex-start;
9 | cursor: pointer;
10 |
11 | .inner {
12 | width: 100%;
13 | overflow: hidden;
14 | }
15 |
16 | &:hover {
17 | background-color: #ebecf0;
18 | }
19 |
20 | .num {
21 | color: var(--60-text-3, rgba(18, 19, 22, 0.6));
22 | font-size: 12px;
23 | font-weight: 600;
24 | line-height: 18px;
25 | margin-right: 7px;
26 | }
27 |
28 | p {
29 | white-space: wrap;
30 | max-width: 95%;
31 | overflow: hidden;
32 | text-overflow: ellipsis;
33 | margin: 0 !important;
34 | }
35 |
36 | // .origin {
37 | // display: flex;
38 | // justify-content: flex-start;
39 | // align-items: center;
40 | // margin-bottom: 2px;
41 |
42 | // .icon {
43 | // width: 16px;
44 | // height: 16px;
45 | // border-radius: 4px;
46 | // background-color: #6f7f9b;
47 | // margin-right: 2px;
48 | // }
49 |
50 | // span {
51 | // color: var(--60-text-3, rgba(18, 19, 22, 0.60));
52 | // font-size: 12px;
53 | // font-weight: 400;
54 | // line-height: 18px;
55 | // }
56 | // }
57 |
58 | p.title {
59 | overflow: hidden;
60 | color: var(---Brand1-5, #3477eb);
61 | text-overflow: ellipsis;
62 | font-size: 14px;
63 | line-height: 24px;
64 | margin-bottom: 2px;
65 | font-weight: normal;
66 |
67 | a {
68 | text-decoration: none;
69 | color: var(---Brand1-5, #3477eb);
70 | font-weight: normal;
71 | }
72 | }
73 |
74 | p.url {
75 | color: var(--60-text-3, rgba(18, 19, 22, 0.6));
76 | font-size: 12px;
77 | line-height: 18px;
78 | height: 18px;
79 | overflow: hidden;
80 | }
81 |
82 | p.summ {
83 | color: rgba(18, 19, 22, 0.8);
84 | font-size: 13px;
85 | line-height: 20px;
86 | white-space: wrap;
87 | display: -webkit-box;
88 | -webkit-box-orient: vertical;
89 | -webkit-line-clamp: 2;
90 | overflow: hidden;
91 | text-overflow: ellipsis;
92 | }
93 |
94 | &.highLight {
95 | background: var(--brand-11, #e6f2ff);
96 |
97 | &:hover {
98 | background-color: #b3d6ff;
99 | }
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/chat-right/components/search-item/index.tsx:
--------------------------------------------------------------------------------
1 | import classNames from 'classnames';
2 | import styles from './index.module.less';
3 |
4 | interface ISearchItemProps {
5 | item: any;
6 | }
7 |
8 | const SearchItem = ({ item }: ISearchItemProps) => {
9 | const openLink = (url: string) => {
10 | window.open(url);
11 | };
12 | return (
13 | {
17 | openLink(item.url);
18 | }}
19 | >
20 |
{item.id}
21 |
22 |
{item?.url}
23 |
{item?.title}
24 |
{item?.summ}
25 |
26 |
27 | );
28 | };
29 |
30 | export default SearchItem;
31 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/chat-right/index.module.less:
--------------------------------------------------------------------------------
1 | .rightContent {
2 | width: 44.44%;
3 | max-width: 800px;
4 | flex-shrink: 0;
5 | box-sizing: border-box;
6 | padding: 24px 0 24px 24px;
7 | border-radius: 16px;
8 | border: 1px solid var(----line-2, #ebecf0);
9 | background: var(---fill-0, #fff);
10 | height: 100%;
11 | overflow: hidden;
12 | position: relative;
13 | display: flex;
14 | justify-content: flex-start;
15 | flex-direction: column;
16 |
17 | .toggleIcon {
18 | position: absolute;
19 | right: 24px;
20 | top: 28px;
21 | cursor: pointer;
22 | }
23 |
24 | .titleNode {
25 | color: #121316;
26 | font-size: 24px;
27 | font-weight: 600;
28 | line-height: 30px;
29 | margin-bottom: 32px;
30 | max-width: calc(100% - 40px);
31 | }
32 |
33 | .nodeInfo {
34 | height: 100%;
35 | overflow-y: auto;
36 | padding-right: 24px;
37 |
38 | &.forbidScroll {
39 | overflow-y: hidden;
40 | }
41 |
42 | &::-webkit-scrollbar {
43 | width: 6px;
44 | }
45 |
46 | &::-webkit-scrollbar-track {
47 | background-color: rgba(255, 255, 255, 0);
48 | border-radius: 100px;
49 | }
50 |
51 | &::-webkit-scrollbar-thumb {
52 | background-color: rgba(255, 255, 255, 0);
53 | border-radius: 100px;
54 | }
55 | }
56 |
57 | .conclusion {
58 | padding-top: 8px;
59 | color: #121316;
60 | font-size: 14px;
61 | line-height: 24px;
62 |
63 | ul {
64 | padding-left: 24px;
65 | }
66 | }
67 |
68 | .steps {
69 | .title {
70 | color: var(--100-text-5, #121316);
71 | font-size: 20px;
72 | font-weight: 600;
73 | line-height: 30px;
74 | display: flex;
75 | justify-content: flex-start;
76 | align-items: center;
77 | position: relative;
78 |
79 | .open {
80 | position: absolute;
81 | right: 0;
82 | font-size: 20px;
83 | font-weight: normal;
84 | cursor: pointer;
85 |
86 | span {
87 | color: #121316;
88 | // opacity: 0.6;
89 | }
90 | }
91 |
92 | i {
93 | width: 12px;
94 | height: 12px;
95 | border-radius: 50%;
96 | background-color: #3477EB;
97 | margin-right: 12px;
98 | }
99 | }
100 |
101 | &.thinking,
102 | &.select {
103 | margin-bottom: 24px;
104 | }
105 |
106 | &.select {
107 | .searchList {
108 | margin-top: 0 !important;
109 | border-radius: 8px;
110 | background: var(--fill-2, #f4f5f9);
111 | padding: 8px;
112 | }
113 | }
114 |
115 | .con {
116 | margin-left: 5px;
117 | padding-top: 12px;
118 | padding-left: 15px;
119 | margin-bottom: 24px;
120 | border-left: 1px solid #D7D8DD;
121 | height: auto;
122 |
123 | &.limitHeight {
124 | max-height: calc(100vh - 320px);
125 | overflow-y: auto;
126 |
127 | &::-webkit-scrollbar {
128 | width: 6px;
129 | }
130 |
131 | &::-webkit-scrollbar-track {
132 | background-color: rgba(255, 255, 255, 0);
133 | border-radius: 100px;
134 | }
135 |
136 | &::-webkit-scrollbar-thumb {
137 | background-color: rgba(255, 255, 255, 0);
138 | border-radius: 100px;
139 | }
140 | }
141 |
142 | .draft {
143 | margin-bottom: 20px;
144 | }
145 |
146 | p {
147 | margin: 0;
148 | line-height: 24px;
149 | }
150 |
151 | &.collapsed {
152 | overflow: hidden;
153 | height: 0;
154 | padding-top: 24px;
155 | margin-bottom: 0 !important;
156 |
157 | // transition: all 1s;
158 |
159 | }
160 | }
161 |
162 | &:last-child {
163 | .collapsed {
164 | padding-top: 0;
165 | }
166 | }
167 | }
168 |
169 | .query,
170 | >.searchList {
171 | margin-top: 24px;
172 | margin-bottom: 24px;
173 | }
174 |
175 | .subTitle {
176 | color: var(--100-text-5, #121316);
177 | font-size: 14px;
178 | font-weight: 600;
179 | line-height: 24px;
180 | margin-bottom: 12px;
181 |
182 | span {
183 | margin-right: 4px;
184 | }
185 | }
186 |
187 | .searchList {
188 | margin-top: 0 !important;
189 | border-radius: 16px;
190 | background: var(--fill-2, #f4f5f9);
191 | padding: 8px;
192 | }
193 |
194 | .searchList {
195 | .thought {
196 | color: rgba(18, 19, 22, 0.8);
197 | font-size: 14px;
198 | line-height: 24px;
199 | margin-bottom: 16px;
200 | }
201 |
202 | .scrollCon {
203 | padding-right: 6px;
204 | height: auto;
205 | max-height: 542px;
206 | overflow-y: auto;
207 | position: relative;
208 | }
209 |
210 | .scrollCon::-webkit-scrollbar {
211 | width: 6px;
212 | }
213 |
214 | .scrollCon::-webkit-scrollbar-track {
215 | background-color: rgba(255, 255, 255, 0);
216 | border-radius: 100px;
217 | }
218 |
219 | .scrollCon::-webkit-scrollbar-thumb {
220 | background-color: #ebecf0;
221 | border-radius: 20px;
222 | }
223 |
224 | .inner {
225 | width: 100%;
226 | border-radius: 8px;
227 | transition: all 0.5s ease;
228 | box-sizing: border-box;
229 | }
230 | }
231 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/custom-markdown/index.module.less:
--------------------------------------------------------------------------------
1 | .markdownCon {
2 | display: flex;
3 | flex-direction: column;
4 | justify-content: flex-start;
5 |
6 | h1 {
7 | font-size: 26px;
8 | }
9 |
10 | h2 {
11 | font-size: 24px;
12 | }
13 |
14 | h3 {
15 | font-size: 20px;
16 | }
17 |
18 | h4 {
19 | font-size: 18px;
20 | }
21 |
22 | h5,
23 | h6 {
24 | font-size: 16px;
25 | }
26 |
27 | p {
28 | color: rgba(18, 19, 22, 0.8);
29 | font-size: 16px;
30 | font-weight: 400;
31 | line-height: 28px;
32 | margin: 0 0 16px 0;
33 | }
34 |
35 | ul {
36 | margin-bottom: 8px;
37 | padding-left: 22px;
38 | }
39 |
40 | li {
41 | color: rgba(18, 19, 22, 0.8);
42 | font-size: 16px;
43 | font-weight: 400;
44 | line-height: 28px;
45 |
46 | p {
47 | margin-bottom: 4px;
48 | }
49 | }
50 |
51 | >p:last-child {
52 | margin-bottom: 0;
53 | }
54 | }
55 |
56 | .footerFlag {
57 | width: 18px;
58 | height: 18px;
59 | display: inline-flex;
60 | justify-content: center;
61 | align-items: center;
62 | border-radius: 4px;
63 | background: var(--fill-2, #f4f5f9);
64 | color: var(--35-text-2, rgba(18, 19, 22, 0.35));
65 | font-size: 12px;
66 | font-weight: 600;
67 | margin-left: 2px;
68 | cursor: pointer;
69 | font-style: normal;
70 |
71 | /* 150% */
72 | &:hover {
73 | background: var(---Brand1-5, #3477eb);
74 | color: #fff;
75 |
76 | svg path {
77 | fill: #fff;
78 | fill-opacity: 1;
79 | }
80 | }
81 | }
82 |
83 | // .mergeQuoLi {
84 | // margin-bottom: 12px;
85 | // }
86 |
87 | li {
88 | cursor: pointer;
89 |
90 | .url {
91 | color: var(--60-text-3, rgba(18, 19, 22, 0.6));
92 | font-size: 12px;
93 | font-weight: 400;
94 | line-height: 18px;
95 | max-width: 100%;
96 | height: 18px;
97 | overflow: hidden;
98 | text-overflow: ellipsis;
99 | }
100 |
101 | .title {
102 | color: var(---Brand1-5, #3477eb);
103 | font-size: 14px;
104 | line-height: 21px;
105 | }
106 |
107 | .summ {
108 | white-space: wrap;
109 | display: -webkit-box;
110 | -webkit-box-orient: vertical;
111 | -webkit-line-clamp: 2;
112 | overflow: hidden;
113 | text-overflow: ellipsis;
114 | }
115 | }
116 |
117 | .line {
118 | margin: 4px 8px;
119 | border: 1px solid #ebecf0;
120 | transform: scaleY(0.5);
121 | }
122 |
123 | :global {
124 | .iQuoPopover {
125 | max-width: 420px;
126 | }
127 |
128 | .mergeQuoPopover {
129 | border-radius: 12px;
130 | border: 1px solid var(----line-2, #ebecf0);
131 | background: var(---fill-0, #fff);
132 | box-shadow: 1px 3px 8px 0px rgba(0, 0, 0, 0.06);
133 | max-height: 240px;
134 | max-width: 420px;
135 | overflow-y: auto;
136 |
137 | .ant-popover-inner {
138 | padding: 8px !important;
139 | }
140 |
141 | .ant-popover-inner-content .line:last-child {
142 | display: none;
143 | }
144 |
145 | li {
146 | border-radius: 8px;
147 | padding: 8px;
148 |
149 | &:hover {
150 | background-color: #f4f5f9;
151 | }
152 | }
153 | }
154 |
155 | .mergeQuoPopover::-webkit-scrollbar {
156 | width: 6px;
157 | }
158 |
159 | .mergeQuoPopover::-webkit-scrollbar-track {
160 | background-color: rgba(255, 255, 255, 0);
161 | border-radius: 100px;
162 | }
163 |
164 | .mergeQuoPopover::-webkit-scrollbar-thumb {
165 | background-color: #ebecf0;
166 | border-radius: 20px;
167 | }
168 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/custom-markdown/index.tsx:
--------------------------------------------------------------------------------
1 | import ReactMarkdown from 'react-markdown';
2 | import rehypeRaw from 'rehype-raw';
3 | import { replaceStr, mergeReplaceToDiv } from '../../utils/tools';
4 | import { Popover } from 'antd';
5 | import classNames from 'classnames';
6 | import styles from './index.module.less';
7 | import { useEffect } from 'react';
8 |
9 | interface IMarkdownProps {
10 | source: string;
11 | refList?: any;
12 | quoType?: string;
13 | chatIsOver?: boolean;
14 | }
15 |
16 | const CustomMarkdown = ({ source, refList = null, quoType = 'single', chatIsOver = false }: IMarkdownProps) => {
17 | const linkToExtend = (url: string) => {
18 | window.open(url);
19 | };
20 |
21 | const CustomI = ({ children, className, ...props }: any) => {
22 | const content = refList
23 | ? Object.keys(refList).map((item) => {
24 | if (Number(item) === Number(children)) {
25 | return (
26 | {
29 | linkToExtend(refList[item].url);
30 | }}
31 | >
32 | {refList[item].url}
33 | {refList[item].title}
34 | {refList[item].summ}
35 |
36 | );
37 | } else {
38 | return null;
39 | }
40 | })
41 | : null;
42 | return className.includes('custom') ? (
43 |
44 | {children}
45 |
46 | ) : (
47 | {children}
48 | );
49 | };
50 |
51 | const CustomDiv = ({ children, className, ...props }: any) => {
52 | const list = props['data-ids'].split(',');
53 | const content = refList
54 | ? Object.keys(refList).map((item) => {
55 | if (list.includes(String(item))) {
56 | return (
57 | <>
58 | {
62 | linkToExtend(refList[item].url);
63 | }}
64 | >
65 | {refList[item].url}
66 | {refList[item].title}
67 |
68 |
69 | >
70 | );
71 | } else {
72 | return null;
73 | }
74 | })
75 | : null;
76 | return className.includes('mergeQuo') ? (
77 |
78 |
79 |
80 |
87 |
94 |
95 |
96 |
97 | ) : (
98 | {children}
99 | );
100 | };
101 |
102 | return (
103 |
104 |
105 | {
106 | refList ?
107 | quoType === 'merge' ? mergeReplaceToDiv(source) :
108 | replaceStr(source) :
109 | source
110 | }
111 |
112 |
113 | );
114 | };
115 |
116 | export default CustomMarkdown;
117 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/iconfont/index.tsx:
--------------------------------------------------------------------------------
1 | import { createFromIconfontCN } from '@ant-design/icons';
2 |
3 | // //at.alicdn.com/t/c/font_3858115_yl9vl04f0jc.js
4 | const IconFont = createFromIconfontCN({
5 | // scriptUrl: "//static.openxlab.org.cn/cmg-animation-upload/iconfont.js",
6 | scriptUrl: '//at.alicdn.com/t/c/font_3858115_p8dw9q83s0h.js',
7 | });
8 |
9 | export default IconFont;
10 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/loading/index.module.less:
--------------------------------------------------------------------------------
1 | .loading99 {
2 | margin: 20px;
3 | position: relative;
4 | width: 1px;
5 | height: 1px;
6 | }
7 |
8 | .loading99:before,
9 | .loading99:after {
10 | position: absolute;
11 | display: inline-block;
12 | width: 15px;
13 | height: 15px;
14 | content: '';
15 | border-radius: 100%;
16 | background-color: #5551ff;
17 | }
18 |
19 | .loading99:before {
20 | left: -15px;
21 | animation: ball-pulse infinite 0.75s -0.4s cubic-bezier(0.2, 0.68, 0.18, 1.08);
22 | }
23 |
24 | .loading99:after {
25 | right: -15px;
26 | animation: ball-pulse infinite 0.75s cubic-bezier(0.2, 0.68, 0.18, 1.08);
27 | }
28 |
29 | @keyframes ball-pulse {
30 | 0% {
31 | transform: scale(1);
32 | opacity: 1;
33 | }
34 |
35 | 50% {
36 | transform: scale(0.1);
37 | opacity: 0.6;
38 | }
39 |
40 | 100% {
41 | transform: scale(1);
42 | opacity: 1;
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/loading/index.tsx:
--------------------------------------------------------------------------------
1 | import styles from './index.module.less';
2 |
3 | const Loading = () => {
4 | return
;
5 | };
6 | export default Loading;
7 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/mind-map-item/index.module.less:
--------------------------------------------------------------------------------
1 | article {
2 | padding: 6px 16px;
3 | border-radius: 8px;
4 | height: 38px;
5 | border: 1px solid transparent;
6 | background: #fff;
7 | color: #121316;
8 | text-align: center;
9 | font-size: 14px;
10 | line-height: 24px;
11 | position: relative;
12 | box-sizing: border-box;
13 |
14 | &.loading {
15 | line-height: 20px;
16 | border-radius: 8px;
17 | overflow: hidden;
18 | border: 1px solid transparent;
19 | padding: 4px;
20 |
21 | span {
22 | color: #3477eb;
23 | background-color: #fff;
24 | border-radius: 4px;
25 | line-height: 24px;
26 | padding: 2px 12px;
27 | }
28 |
29 | .looping {
30 | --border-width: 4px;
31 | --follow-panel-linear-border: linear-gradient(90deg, #3477eb 0.58%, #FFB4BA 100.36%);
32 |
33 | position: absolute;
34 | top: 0;
35 | left: 0;
36 | width: calc(100% + var(--border-width) * 3 - 8px);
37 | height: 100%;
38 | background: var(--follow-panel-linear-border);
39 | background-size: 300% 300%;
40 | background-position: 0 50%;
41 | animation: moveGradient 4s linear infinite;
42 | }
43 | }
44 |
45 | &.disabled {
46 | border-radius: 8px;
47 | border: 1px solid #d7d8dd;
48 | color: rgba(18, 19, 22, 0.35);
49 | }
50 |
51 | &.finished {
52 | cursor: pointer;
53 | border: 1px solid #3477EB;
54 |
55 | &:hover {
56 | background-color: #E6F2FF;
57 | }
58 |
59 | .finishDot {
60 | position: absolute;
61 | top: 6px;
62 | right: 6px;
63 | width: 6px;
64 | height: 6px;
65 | background-color: #3477EB;
66 | border-radius: 50%;
67 | }
68 | }
69 |
70 | &.forbidden {
71 | cursor: not-allowed;
72 | }
73 |
74 | &.emptyNode {
75 | padding: 0 !important;
76 | border: 0;
77 | }
78 |
79 | &.active {
80 | border-radius: 8px;
81 | border: 1px solid var(---Brand1-5, #3477EB);
82 | background: var(---Brand1-5, #3477EB);
83 | color: #fff;
84 |
85 | &:hover {
86 | border: 1px solid var(---Brand1-5, #3477EB);
87 | background: var(---Brand1-5, #3477EB);
88 | color: #fff;
89 | }
90 |
91 | .dot {
92 | position: absolute;
93 | top: 6px;
94 | right: 6px;
95 | width: 6px;
96 | height: 6px;
97 | background-color: #fff;
98 | border-radius: 50%;
99 | }
100 | }
101 |
102 | &.init {
103 | border: 1px solid transparent;
104 | cursor: auto;
105 | }
106 |
107 | span {
108 | display: block;
109 | white-space: nowrap;
110 | max-width: 160px;
111 | overflow: hidden;
112 | text-overflow: ellipsis;
113 | position: relative;
114 | z-index: 20;
115 | }
116 |
117 | span.status {
118 | color: #4082fe;
119 | }
120 | }
121 |
122 | ul.onlyone {
123 | &:before {
124 | opacity: 0;
125 | }
126 |
127 | >li {
128 | margin-left: 0px;
129 | }
130 |
131 | &>li:after {
132 | opacity: 0;
133 | }
134 |
135 | &>li:before {
136 | // left: 0;
137 | }
138 | }
139 |
140 | .endLine {
141 | border-bottom: 1px solid #d7d8dd;
142 | width: 3000px;
143 | transition: width 1s ease-in-out;
144 | }
145 |
146 | @keyframes moveGradient {
147 | 50% {
148 | background-position: 100% 50%;
149 | }
150 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/mind-map-item/index.tsx:
--------------------------------------------------------------------------------
1 | import styles from './index.module.less';
2 | import classNames from 'classnames';
3 | import { useEffect, useContext } from 'react';
4 | import { MindsearchContext } from '../../provider/context';
5 |
6 | // 递归组件用于渲染mindMap中的节点
7 | const MindMapItem = ({ item, isEnd, selectNode }: any) => {
8 | const { activeNode, chatIsOver } = useContext(MindsearchContext);
9 | // console.log('[ms item------]', item, isEnd, currentNodeName)
10 | // 递归渲染子节点
11 | const renderChildren = () => {
12 | if (item?.children?.length > 0) {
13 | return (
14 |
15 | {item.children.map((child: any, idx: number) => (
16 |
17 | ))}
18 |
19 | );
20 | }
21 | return null;
22 | };
23 |
24 | const handleClick = () => {
25 | if (item?.state === 0 || (item?.name === '原始问题' || item?.name === '最终回复')) {
26 | return;
27 | }
28 | selectNode(item?.name);
29 | };
30 |
31 | return (
32 |
33 |
43 | {item?.name}
44 | {item?.state === 1 &&
}
45 | {item?.id !== 0 &&
}
46 | {item?.name && item?.name === activeNode ?
: ''}
47 |
48 | {item?.children?.length > 0 && renderChildren()}
49 | {isEnd && (item?.children?.length === 0 || !item?.children) &&
}
50 |
51 | );
52 | };
53 |
54 | export default MindMapItem;
55 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/mind-map/index.module.less:
--------------------------------------------------------------------------------
1 | .mapArea {
2 | width: 100%;
3 | overflow-x: auto;
4 | overflow-y: hidden;
5 | // transition: all 0.2s linear;
6 |
7 | &::-webkit-scrollbar {
8 | height: 6px;
9 | }
10 |
11 | &::-webkit-scrollbar-track {
12 | background-color: rgba(255, 255, 255, 0);
13 | border-radius: 10px;
14 | }
15 |
16 | &::-webkit-scrollbar-thumb {
17 | background-color: #d7d8dd;
18 | border-radius: 100px;
19 | }
20 | }
21 |
22 | .end {
23 | position: absolute;
24 | right: 0;
25 | background-color: #fff;
26 | display: flex;
27 | justify-content: center;
28 | align-items: center;
29 | border-left: 1px solid #d7d8dd;
30 | padding-left: 16px;
31 |
32 | .node {
33 | position: relative;
34 |
35 | &::before {
36 | content: '';
37 | border: 1px solid #d7d8dd;
38 | border-top: none;
39 | border-left: none;
40 | width: 14px;
41 | height: 0px;
42 | position: absolute;
43 | left: -16px;
44 | top: 50%;
45 | // transform: translateY(-50%);
46 | }
47 |
48 | article {
49 | padding: 8px 16px;
50 | border-radius: 8px;
51 | border: 1px solid transparent;
52 | color: #3477eb;
53 | text-align: center;
54 | font-size: 14px;
55 | line-height: 24px;
56 | box-sizing: border-box;
57 | background: #e6f2ff;
58 | }
59 | }
60 | }
61 |
62 | .mindmap {
63 | position: relative;
64 | margin-right: 16px;
65 |
66 | article {
67 | padding: 6px 16px;
68 | border-radius: 8px;
69 | height: 38px;
70 | border: 1px solid transparent;
71 | background: #fff;
72 | color: #121316;
73 | text-align: center;
74 | font-size: 14px;
75 | line-height: 24px;
76 | position: relative;
77 | box-sizing: border-box;
78 |
79 | &.loading {
80 | line-height: 20px;
81 | border-radius: 8px;
82 | overflow: hidden;
83 | border: 1px solid transparent;
84 | padding: 4px;
85 |
86 | span {
87 | color: #2126c0;
88 | background-color: #fff;
89 | border-radius: 4px;
90 | line-height: 24px;
91 | padding: 2px 12px;
92 | }
93 |
94 | .looping {
95 | --border-width: 4px;
96 | --follow-panel-linear-border: linear-gradient(91deg, #5551ff 0.58%, #ff87de 100.36%);
97 |
98 | position: absolute;
99 | top: 0;
100 | left: 0;
101 | width: calc(100% + var(--border-width) * 2 - 8px);
102 | height: calc(100%);
103 | background: var(--follow-panel-linear-border);
104 | background-size: 300% 300%;
105 | background-position: 0 50%;
106 | animation: moveGradient 4s alternate infinite;
107 | }
108 | }
109 |
110 | &.disabled {
111 | border-radius: 8px;
112 | border: 1px solid #d7d8dd;
113 | color: rgba(18, 19, 22, 0.35);
114 | }
115 |
116 | &.finished {
117 | cursor: pointer;
118 | border: 1px solid #2126c0;
119 |
120 | .finishDot {
121 | position: absolute;
122 | top: 6px;
123 | right: 6px;
124 | width: 6px;
125 | height: 6px;
126 | background-color: #c9c0fe;
127 | border-radius: 50%;
128 | }
129 | }
130 |
131 | &.init {
132 | border: 1px solid transparent;
133 | cursor: auto;
134 | }
135 |
136 | span {
137 | display: block;
138 | white-space: nowrap;
139 | max-width: 160px;
140 | overflow: hidden;
141 | text-overflow: ellipsis;
142 | position: relative;
143 | z-index: 20;
144 | }
145 |
146 | span.status {
147 | color: #4082fe;
148 | }
149 | }
150 |
151 | // 第一个article,起始节点
152 | >li {
153 | >article {
154 | border-radius: 8px;
155 | background: #e6f2ff;
156 | color: #3477eb;
157 | }
158 | }
159 |
160 | li {
161 | list-style: none;
162 | display: flex;
163 | align-items: center;
164 | box-sizing: border-box;
165 | margin: 16px;
166 | line-height: 1;
167 | position: relative;
168 |
169 | &>ul.onlyone {
170 | &:before {
171 | opacity: 0;
172 | }
173 |
174 | >li {
175 | margin-left: 0px;
176 | }
177 |
178 | &>li:after {
179 | opacity: 0;
180 | }
181 |
182 | &>li:before {
183 | // left: 0;
184 | }
185 | }
186 |
187 | &>ul:before {
188 | content: '';
189 | border: 1px solid #d7d8dd;
190 | border-top: none;
191 | border-left: none;
192 | width: calc(16px - 2px);
193 | height: 0px;
194 | position: absolute;
195 | left: 0;
196 | top: 50%;
197 | // transform: translateY(-50%);
198 | }
199 |
200 | &:before {
201 | content: '';
202 | border: 1px solid #d7d8dd;
203 | border-top: none;
204 | border-left: none;
205 | width: 16px;
206 | height: 0px;
207 | position: absolute;
208 | left: calc(-16px - 1px);
209 | }
210 |
211 | &:after {
212 | content: '';
213 | border: 1px solid #d7d8dd;
214 | border-top: none;
215 | border-left: none;
216 | width: 0px;
217 | height: calc(100% / 2 + 33px);
218 | position: absolute;
219 | left: calc(-16px - 2px);
220 | }
221 |
222 | &:first-of-type:after {
223 | top: 50%;
224 | }
225 |
226 | &:last-of-type:after {
227 | bottom: 50%;
228 | }
229 |
230 | ul {
231 | padding: 0 0 0 16px;
232 | position: relative;
233 | margin-bottom: 0;
234 | }
235 | }
236 |
237 | &>li {
238 |
239 | &:after,
240 | &:before {
241 | display: none;
242 | }
243 | }
244 |
245 | .endLine {
246 | border-bottom: 1px solid #d7d8dd;
247 | width: 3000px;
248 | transition: width 1s ease-in-out;
249 | }
250 | }
251 |
252 | @keyframes moveGradient {
253 | 50% {
254 | background-position: 100% 50%;
255 | }
256 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/mind-map/index.tsx:
--------------------------------------------------------------------------------
1 | import styles from './index.module.less';
2 | import { useState, useEffect } from 'react';
3 | import MindMapItem from '../mind-map-item';
4 |
5 | interface IEndStyle {
6 | top: string;
7 | height: number;
8 | }
9 | const MindMapGraph = (props: any) => {
10 | const { isEnd, renderData, handleNodeClick } = props;
11 | const [mapId] = useState(Date.now());
12 | const [showEndNode, setShowEndNode] = useState(false);
13 | const [mapWidth, setMapWidth] = useState(0);
14 | const [endStyle, setEndStyle] = useState({
15 | top: '50%',
16 | height: 0,
17 | });
18 | const generateWidth = () => {
19 | const articles = document.getElementById(`mindMap-${mapId}`)?.querySelectorAll('article');
20 | // 确保至少有两个元素
21 | if (articles?.length) {
22 | let maxRight = 0;
23 | articles.forEach((item, index) => {
24 | if (item.getBoundingClientRect().right > maxRight) {
25 | maxRight = item.getBoundingClientRect().right;
26 | }
27 | });
28 | const firstArticle = articles[0].getBoundingClientRect();
29 | if (maxRight - firstArticle.left + 200 > mapWidth) {
30 | return maxRight - firstArticle.left + 200;
31 | } else {
32 | return mapWidth;
33 | }
34 | } else {
35 | return 100;
36 | }
37 | };
38 |
39 | const generateEndStyle = () => {
40 | // 获取所有class为endline的div元素
41 | const mindMap = document.getElementById(`mindMap-${mapId}`);
42 | const endlineDivs = document.getElementById(`mindMap-${mapId}`)?.querySelectorAll('.endline') || [];
43 | // console.log('generateEndStyle-----', mapId, `mindMap-${mapId}`, document.getElementById(`mindMap-${mapId}`), mindMap, endlineDivs, endlineDivs?.length);
44 | // 确保至少有两个元素
45 | if (endlineDivs?.length >= 2 && mindMap) {
46 | // 获取第一个和最后一个元素的边界框(bounding rectangle)
47 | const firstRect = endlineDivs?.[0].getBoundingClientRect();
48 | const lastRect = endlineDivs?.[endlineDivs.length - 1].getBoundingClientRect();
49 | const mindMapRect = mindMap?.getBoundingClientRect();
50 | // 计算y值的差值
51 | const yDiff = lastRect.top - firstRect.top;
52 | // const top = firstRect.top - mindMapRect.top;
53 | // 如果需要包含元素的完整高度(不仅仅是顶部位置),可以加上元素的高度
54 | // const yDiffWithHeight = yDiff + (lastRect.height - firstRect.height);
55 | return {
56 | top: firstRect.top - mindMapRect.top,
57 | height: yDiff + 1,
58 | };
59 | } else {
60 | return {
61 | top: '50%',
62 | height: 0,
63 | };
64 | }
65 | };
66 |
67 | useEffect(() => {
68 | setMapWidth(generateWidth());
69 | }, [renderData.length]);
70 |
71 | useEffect(() => {
72 | if (!isEnd) return;
73 | setMapWidth(generateWidth());
74 | setTimeout(() => {
75 | setEndStyle(generateEndStyle() as IEndStyle);
76 | setShowEndNode(true);
77 | }, 200);
78 | }, [isEnd]);
79 |
80 | return (
81 |
82 |
83 | {renderData.map((item: any) => (
84 |
90 | ))}
91 | {showEndNode && (
92 |
97 | )}
98 |
99 |
100 | );
101 | };
102 |
103 | export default MindMapGraph;
104 |
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/notice/index.module.less:
--------------------------------------------------------------------------------
1 | .notice {
2 | color: #12131659;
3 | padding-top: 8px;
4 | text-align: center;
5 | font-weight: 400;
6 | font-size: 12px;
7 | line-height: 18px;
8 |
9 | a {
10 | text-decoration: none;
11 | color: #444;
12 | display: inline-flex;
13 | align-items: center;
14 |
15 | span {
16 | font-size: 18px;
17 | }
18 | }
19 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/notice/index.tsx:
--------------------------------------------------------------------------------
1 | import styles from './index.module.less';
2 | import IconFont from '../iconfont';
3 |
4 | const Notice = () => {
5 | return <>
6 |
7 | Powered by InternLM2.5, this service has been specifically optimized for Chinese. For a better experience in English, you can build it locally.
8 | >;
9 | };
10 | export default Notice;
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/session-item/index.module.less:
--------------------------------------------------------------------------------
1 | .questionArea {
2 | display: flex;
3 | justify-content: flex-start;
4 | margin-bottom: 40px;
5 |
6 | .avatar {
7 | width: 32px;
8 | height: 32px;
9 | margin-right: 16px;
10 | border-radius: 50%;
11 | background-color: #ddd;
12 | flex-shrink: 0;
13 | margin-top: 7px;
14 | overflow: hidden;
15 |
16 | img {
17 | width: 100%;
18 | }
19 | }
20 |
21 | .question {
22 | padding: 12px 16px;
23 | background: #E6F2FF;
24 | max-width: 93.75%;
25 | border-radius: 16px;
26 |
27 | span {
28 | color: #121316cc;
29 | font-size: 16px;
30 | line-height: 28px;
31 | }
32 | }
33 | }
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/components/session-item/index.tsx:
--------------------------------------------------------------------------------
1 | import { useEffect, useState, useRef, useMemo, useContext } from 'react';
2 | import styles from './index.module.less';
3 | import Answer from '../answer';
4 |
5 | const SessionItem = ({ item, handleNodeClick, idx }: any) => {
6 | return <>
7 |
8 |
9 |
10 | {item.question}
11 |
12 |
13 |
23 | >
24 | };
25 |
26 | export default SessionItem;
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/provider/context.tsx:
--------------------------------------------------------------------------------
1 | import React, { createContext, useCallback, useContext, useMemo, useRef, useState } from 'react';
2 |
3 | export const MindsearchContext = createContext(
4 | {} as {
5 | activeNode: string;
6 | isEnd: boolean;
7 | chatIsOver: boolean;
8 | setActiveNode?: () => void;
9 | },
10 | );
--------------------------------------------------------------------------------
/frontend/React/src/pages/mindsearch/utils/tools.ts:
--------------------------------------------------------------------------------
1 | export const getQueryString = (search: string, name: string) => {
2 | if (!search) return '';
3 | const reg = new RegExp(`(^|&)${name}=([^&]*)(&|$)`);
4 | const result = search.substring(1).match(reg);
5 | if (result != null) return result[2];
6 | return '';
7 | };
8 |
9 | export const isInWhiteList = (url = '', list: string[] = []) => {
10 | const baseUrl = url.split('?')[0];
11 | for (const whiteApi of list) {
12 | if (baseUrl.endsWith(whiteApi)) {
13 | return true;
14 | }
15 | }
16 | return false;
17 | };
18 |
19 | export const mergeReplaceToDiv = (str: string) => {
20 | // 优化后的正则表达式,使用非捕获组和正向断言来确保匹配的是连续的[[数字]]序列
21 | const regexOptimized = /\[\[(\d+)\]](?:\s*\[\[(\d+)\]])*/g;
22 |
23 | // 替换函数(优化版),直接处理整个匹配到的字符串
24 | function replaceWithDivSpanOptimized(match: any) {
25 | // 提取出所有数字,由于我们已经知道匹配的是连续的[[数字]]序列,所以可以直接分割字符串
26 | const numbers = match
27 | .slice(2, -2)
28 | .split(']][[')
29 | .map((num: any) => parseInt(num, 10));
30 | return `${numbers.map((num: number) => `${num} `).join('')} `;
31 | }
32 |
33 | // 使用优化后的正则表达式和替换函数处理Markdown字符串
34 | const resultOptimized = str.replace(regexOptimized, replaceWithDivSpanOptimized);
35 |
36 | return resultOptimized;
37 | };
38 |
39 | export const replaceStr = (str: string) => {
40 | return str.replace(/\[\[(\d+)\]\]/g, (match: any, number: any) => {
41 | // 创建一个带有class为'f'的span元素,并将数字作为文本内容
42 | return `${number} `;
43 | });
44 | };
45 |
--------------------------------------------------------------------------------
/frontend/React/src/routes/routes.tsx:
--------------------------------------------------------------------------------
1 | import MindSearchCon from '@/pages/mindsearch';
2 | import { ReactElement } from "react";
3 | import { Navigate, useRoutes } from "react-router-dom";
4 | interface RouteItem {
5 | path: string;
6 | needLogin?: boolean;
7 | element: ReactElement;
8 | }
9 |
10 | const routes: RouteItem[] = [
11 | {
12 | path: "/",
13 | needLogin: false,
14 | element: ,
15 | },
16 | {
17 | path: "*",
18 | element: ,
19 | },
20 | ];
21 |
22 | const WrapperRoutes = () => {
23 | return useRoutes(
24 | routes.map((item: RouteItem) => {
25 | return item;
26 | }),
27 | );
28 | };
29 |
30 | export default WrapperRoutes;
31 |
--------------------------------------------------------------------------------
/frontend/React/src/styles/fn.less:
--------------------------------------------------------------------------------
1 | @import './var.less';
2 |
3 | // 单行省略
4 | .singleLine() {
5 | overflow: hidden;
6 | text-overflow: ellipsis;
7 | white-space: nowrap;
8 | }
9 |
10 | // 多行文本省略
11 | .ellispsis(@line) {
12 | overflow: hidden;
13 | text-overflow: ellipsis;
14 | display: -webkit-box;
15 | /* autoprefixer: off */
16 | -webkit-box-orient: vertical;
17 | /* autoprefixer: on */
18 | -webkit-line-clamp: @line;
19 | }
20 |
21 | // 自定义滚动条
22 | .custom-scroll(@bg: transparent, @thumbBg: var(--grey-4)) {
23 | &::-webkit-scrollbar {
24 | width: 4px;
25 | background: @bg;
26 | height: 0px;
27 | }
28 | &::-webkit-scrollbar-thumb {
29 | border-radius: 20px;
30 | background: @bg;
31 | }
32 | &::-webkit-scrollbar-track {
33 | border-radius: 20px;
34 | background: @bg;
35 | opacity: 0;
36 | }
37 | &:hover::-webkit-scrollbar-thumb {
38 | background: @thumbBg;
39 | }
40 | }
41 |
42 | .common-button(@hoverBgColor:var( --blue-3),@bgColor:var(--theme-color),@radius:8px,@color:var(--white-95)) {
43 | border-radius: @radius;
44 | background-color: @bgColor;
45 | display: flex;
46 | align-items: center;
47 | justify-content: center;
48 | color: @color;
49 |
50 | &:hover {
51 | background-color: @hoverBgColor;
52 | cursor: pointer;
53 | }
54 | }
55 |
56 | .diyScroll(@bg: var(--white), @thumbBg: @thumbGrey,@width:@scrollWidth) {
57 | &::-webkit-scrollbar {
58 | width: @width;
59 | background: @bg;
60 | }
61 | &::-webkit-scrollbar-thumb {
62 | border-radius: 90px;
63 | background: @bg;
64 | }
65 | &::-webkit-scrollbar-track {
66 | -webkit-box-shadow: inset 0 0 4px @bg;
67 | border-radius: 90px;
68 | background: @bg;
69 | opacity: 0;
70 | }
71 | &:hover::-webkit-scrollbar-thumb {
72 | background: @thumbBg;
73 | }
74 | }
75 |
76 | .cursorBlink() {
77 | &::after {
78 | content: '';
79 | border: 1px solid var(--black);
80 | margin-left: 0px;
81 | flex: 1;
82 | -webkit-animation:
83 | typing 3s steps(16) forwards,
84 | cursor 1s infinite;
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/frontend/React/src/styles/var.less:
--------------------------------------------------------------------------------
1 | body {
2 | // theme color ------start
3 | --theme-color: rgba(52, 119, 235, 1); //#3477EB
4 | --theme-color-9: rgba(52, 119, 235, 0.9);
5 | --theme-color-8: rgba(52, 119, 235, 0.8);
6 | --theme-color-7: rgba(52, 119, 235, 0.7);
7 | --theme-color-6: rgba(52, 119, 235, 0.6);
8 | --theme-color-5: rgba(52, 119, 235, 0.5);
9 | --theme-color-4: rgba(52, 119, 235, 0.4);
10 | --theme-color-3: rgba(52, 119, 235, 0.3);
11 | --theme-color-2: rgba(52, 119, 235, 0.2);
12 | --theme-color-1: rgba(52, 119, 235, 0.1);
13 | --theme-color-0: rgba(52, 119, 235, 0);
14 |
15 | --dark: rgba(18, 19, 22, 1);
16 | --dark-9: rgba(18, 19, 22, 0.9);
17 | --dark-8: rgba(18, 19, 22, 0.8);
18 | --dark-7: rgba(18, 19, 22, 0.7);
19 | --dark-6: rgba(18, 19, 22, 0.6);
20 | --dark-5: rgba(18, 19, 22, 0.5);
21 | --dark-4: rgba(18, 19, 22, 0.4);
22 | --dark-35: rgba(18, 19, 22, 0.35);
23 | --dark-3: rgba(18, 19, 22, 0.3);
24 | --dark-2: rgba(18, 19, 22, 0.2);
25 | --dark-1: rgba(18, 19, 22, 0.1);
26 |
27 | --black: rgba(0, 0, 0, 1);
28 | --black-9: rgba(0, 0, 0, 0.9);
29 | --black-8: rgba(0, 0, 0, 0.8);
30 | --black-7: rgba(0, 0, 0, 0.7);
31 | --black-6: rgba(0, 0, 0, 0.6);
32 | --black-5: rgba(0, 0, 0, 0.5);
33 | --black-4: rgba(0, 0, 0, 0.4);
34 | --black-3: rgba(0, 0, 0, 0.3);
35 | --black-2: rgba(0, 0, 0, 0.2);
36 | --black-1: rgba(0, 0, 0, 0.1);
37 | --black-06: rgba(0, 0, 0, 0.06);
38 |
39 | --white: rgba(255, 255, 255, 1);
40 | --white-95: rgba(255, 255, 255, 0.95);
41 | --white-9: rgba(255, 255, 255, 0.9);
42 | --white-8: rgba(255, 255, 255, 0.8);
43 | --white-7: rgba(255, 255, 255, 0.7);
44 | --white-6: rgba(255, 255, 255, 0.6);
45 | --white-5: rgba(255, 255, 255, 0.5);
46 | --white-4: rgba(255, 255, 255, 0.4);
47 | --white-3: rgba(255, 255, 255, 0.3);
48 | --white-2: rgba(255, 255, 255, 0.2);
49 | --white-1: rgba(255, 255, 255, 0.1);
50 |
51 | --blue-1: rgba(230, 242, 255, 1);
52 | --blue-2: rgba(179, 214, 255, 1);
53 | --blue-3: rgba(94, 155, 247, 1);
54 | --blue-4: rgba(13, 83, 222, 1);
55 | --blue-5: rgba(2, 56, 184, 1);
56 | --blue-6: rgba(52, 119, 235, 1);
57 |
58 | --grey-1: rgba(248, 249, 250, 1);
59 | --grey-2: rgba(249, 249, 249, 1);
60 | --grey-3: rgba(244, 245, 249, 1);
61 | --grey-4: rgba(235, 236, 240, 1);
62 | --grey-5: rgba(215, 216, 221, 1);
63 | --grey-6: rgba(180, 182, 188, 1);
64 | --grey-7: rgba(147, 150, 157, 1);
65 | --grey-8: rgba(70, 74, 83, 1);
66 | --grey-9: rgba(129, 135, 149, 1);
67 |
68 | --red-1: rgba(255, 117, 102, 1);
69 | --red-2: rgba(207, 45, 39, 1);
70 | --red-3: rgba(255, 136, 0, 1);
71 | --red-4: rgba(245, 72, 59, 1);
72 |
73 | --yellow-1: rgba(255, 239, 158, 1);
74 | // theme color ------end
75 |
76 | // size -------start
77 | --screen-min-width: 1200px;
78 | --screen-min-height: 540px;
79 | --sidebar-mini: 56px;
80 | --sidebar-normal: 152px;
81 | --header-height: 44px;
82 | // size -------end
83 | }
84 |
85 | @scrollWidth: 8px;
86 |
--------------------------------------------------------------------------------
/frontend/React/src/vite-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
3 | interface ImportMetaEnv {
4 | readonly VITE_SSO_URL: string;
5 | }
6 |
7 | interface ImportMeta {
8 | readonly env: ImportMetaEnv;
9 | }
10 |
--------------------------------------------------------------------------------
/frontend/React/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES5",
4 | "useDefineForClassFields": true,
5 | "lib": ["DOM", "DOM.Iterable", "ESNext"],
6 | "allowJs": false,
7 | "skipLibCheck": true,
8 | "esModuleInterop": false,
9 | "allowSyntheticDefaultImports": true,
10 | "strict": true,
11 | "forceConsistentCasingInFileNames": true,
12 | "module": "ESNext",
13 | "moduleResolution": "Node",
14 | "resolveJsonModule": true,
15 | "isolatedModules": true,
16 | "noEmit": true,
17 | "jsx": "react-jsx",
18 | "baseUrl": "./",
19 | "paths": {
20 | "@/*": ["src/*"]
21 | }
22 | },
23 | "include": ["src"]
24 | }
25 |
--------------------------------------------------------------------------------
/frontend/React/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from "vite";
2 | import react from "@vitejs/plugin-react";
3 | import path from "path";
4 | import legacy from "@vitejs/plugin-legacy";
5 |
6 | // https://vitejs.dev/config/
7 | export default defineConfig({
8 | plugins: [
9 | react({
10 | babel: {
11 | plugins: [
12 | "@babel/plugin-proposal-optional-chaining", // 兼容老版本浏览器的语法解译
13 | ],
14 | },
15 | }),
16 | legacy({
17 | targets: ["defaults", "ie >= 11", "chrome >= 52"], //需要兼容的目标列表,可以设置多个
18 | additionalLegacyPolyfills: ["regenerator-runtime/runtime"],
19 | renderLegacyChunks: true,
20 | polyfills: [
21 | "es.symbol",
22 | "es.array.filter",
23 | "es.promise",
24 | "es.promise.finally",
25 | "es/map",
26 | "es/set",
27 | "es.array.for-each",
28 | "es.object.define-properties",
29 | "es.object.define-property",
30 | "es.object.get-own-property-descriptor",
31 | "es.object.get-own-property-descriptors",
32 | "es.object.keys",
33 | "es.object.to-string",
34 | "web.dom-collections.for-each",
35 | "esnext.global-this",
36 | "esnext.string.match-all",
37 | ],
38 | }),
39 | ],
40 | build: {
41 | target: "es5",
42 | },
43 | resolve: {
44 | alias: {
45 | "@": path.resolve(__dirname, "src"),
46 | },
47 | },
48 | css: {
49 | modules: {
50 | localsConvention: "camelCase",
51 | },
52 | },
53 | server: {
54 | port: 8080,
55 | proxy: {
56 | "/solve": {
57 | target: "",
58 | changeOrigin: true,
59 | },
60 | },
61 | },
62 | });
63 |
--------------------------------------------------------------------------------
/frontend/css/gradio_front.css:
--------------------------------------------------------------------------------
1 | .gradio-container.gradio-container-4-44-0.svelte-wpkpf6.app {
2 |
3 | /* /* background-image:url('https://images.unsplash.com/photo-1577563908411-5077b6dc7624?ixlib=rb-4.0.3&q=85&fm=jpg&crop=entropy&cs=srgb&dl=volodymyr-hryshchenko-V5vqWC9gyEU-unsplash.jpg&w=2400'); */
4 | background-image: url('https://github.com/InternLM/MindSearch/blob/main/frontend/React/src/assets/background.png?raw=true');
5 | background-size: cover;
6 | /*整张图片覆盖页面*/
7 | background-position: center;
8 | background-repeat: no-repeat;
9 | background-color: #f9fafb;
10 | }
11 |
12 | .logo {
13 | width: 150px;
14 | }
15 |
16 | .chatbot-container {
17 | height: auto;
18 | max-height: 600px;
19 | /* Maximum height for chatbots */
20 | overflow-y: auto;
21 | }
22 |
23 | /* Style the main chat bar container */
24 | .chat-box {
25 | background-color: #f9fafb;
26 | border-radius: 8px;
27 | padding: 10px;
28 | display: flex;
29 | align-items: center;
30 | }
31 |
32 | /* Style the text input area */
33 | .editor {
34 | border: none;
35 | background-color: transparent;
36 | padding: 5px;
37 | width: 100%;
38 | font-size: 16px;
39 | flex: 9;
40 | /* Allow the input to grow */
41 | }
42 |
43 |
44 |
45 | .toolbarButton {
46 | padding: 5px 10px;
47 | /* Reduce padding to make buttons smaller */
48 | font-size: 14px;
49 | /* Adjust font size if needed */
50 | margin-left: 10px;
51 | /* Add spacing between buttons */
52 | }
53 |
54 | .examples-container {
55 | margin-top: 2px;
56 | /* Reduced margin */
57 | }
58 |
59 | .flex-wrap.user.svelte-1e1jlin.svelte-1e1jlin.svelte-1e1jlin {
60 | background-color: #93c5fd;
61 | }
62 |
63 | #component-10 {
64 | color: #f9fafb;
65 | gap: 0px;
66 | }
67 |
68 | #component-16 {
69 | gap: 0px;
70 | }
71 |
72 | #component-9 {
73 | color: #f9fafb;
74 | }
75 |
76 | #component-1 {
77 | color: #f9fafb;
78 | }
79 |
80 | .gradio-app.gradio-container.gradio-container-4-44-0.contain.chat-box {
81 | background-color: #f9fafb;
82 | }
--------------------------------------------------------------------------------
/frontend/gradio_agentchatbot/__init__.py:
--------------------------------------------------------------------------------
1 | # This component is modified from gradio_agentchatbot:
2 | # - https://huggingface.co/spaces/freddyaboulton/gradio_agentchatbot/tree/main/src/backend/gradio_agentchatbot
3 |
4 | from .agentchatbot import AgentChatbot, ChatbotData
5 | from .utils import ChatFileMessage, ChatMessage, Message, ThoughtMetadata
6 |
7 | __all__ = [
8 | "AgentChatbot",
9 | "ChatbotData",
10 | "stream_from_transformers_agent",
11 | "ChatMessage",
12 | "ThoughtMetadata",
13 | "ChatFileMessage",
14 | "Message",
15 | ]
16 |
--------------------------------------------------------------------------------
/frontend/gradio_agentchatbot/templates/component/assets/worker-lPYB70QI.js:
--------------------------------------------------------------------------------
1 | (function(){"use strict";const R="https://unpkg.com/@ffmpeg/core@0.12.6/dist/umd/ffmpeg-core.js";var E;(function(t){t.LOAD="LOAD",t.EXEC="EXEC",t.WRITE_FILE="WRITE_FILE",t.READ_FILE="READ_FILE",t.DELETE_FILE="DELETE_FILE",t.RENAME="RENAME",t.CREATE_DIR="CREATE_DIR",t.LIST_DIR="LIST_DIR",t.DELETE_DIR="DELETE_DIR",t.ERROR="ERROR",t.DOWNLOAD="DOWNLOAD",t.PROGRESS="PROGRESS",t.LOG="LOG",t.MOUNT="MOUNT",t.UNMOUNT="UNMOUNT"})(E||(E={}));const a=new Error("unknown message type"),f=new Error("ffmpeg is not loaded, call `await ffmpeg.load()` first"),u=new Error("failed to import ffmpeg-core.js");let r;const O=async({coreURL:t,wasmURL:n,workerURL:e})=>{const o=!r;try{t||(t=R),importScripts(t)}catch{if(t||(t=R.replace("/umd/","/esm/")),self.createFFmpegCore=(await import(t)).default,!self.createFFmpegCore)throw u}const s=t,c=n||t.replace(/.js$/g,".wasm"),b=e||t.replace(/.js$/g,".worker.js");return r=await self.createFFmpegCore({mainScriptUrlOrBlob:`${s}#${btoa(JSON.stringify({wasmURL:c,workerURL:b}))}`}),r.setLogger(i=>self.postMessage({type:E.LOG,data:i})),r.setProgress(i=>self.postMessage({type:E.PROGRESS,data:i})),o},l=({args:t,timeout:n=-1})=>{r.setTimeout(n),r.exec(...t);const e=r.ret;return r.reset(),e},m=({path:t,data:n})=>(r.FS.writeFile(t,n),!0),D=({path:t,encoding:n})=>r.FS.readFile(t,{encoding:n}),S=({path:t})=>(r.FS.unlink(t),!0),I=({oldPath:t,newPath:n})=>(r.FS.rename(t,n),!0),L=({path:t})=>(r.FS.mkdir(t),!0),N=({path:t})=>{const n=r.FS.readdir(t),e=[];for(const o of n){const s=r.FS.stat(`${t}/${o}`),c=r.FS.isDir(s.mode);e.push({name:o,isDir:c})}return e},A=({path:t})=>(r.FS.rmdir(t),!0),w=({fsType:t,options:n,mountPoint:e})=>{const o=t,s=r.FS.filesystems[o];return s?(r.FS.mount(s,n,e),!0):!1},k=({mountPoint:t})=>(r.FS.unmount(t),!0);self.onmessage=async({data:{id:t,type:n,data:e}})=>{const o=[];let s;try{if(n!==E.LOAD&&!r)throw f;switch(n){case E.LOAD:s=await O(e);break;case E.EXEC:s=l(e);break;case E.WRITE_FILE:s=m(e);break;case E.READ_FILE:s=D(e);break;case E.DELETE_FILE:s=S(e);break;case E.RENAME:s=I(e);break;case E.CREATE_DIR:s=L(e);break;case E.LIST_DIR:s=N(e);break;case E.DELETE_DIR:s=A(e);break;case E.MOUNT:s=w(e);break;case E.UNMOUNT:s=k(e);break;default:throw a}}catch(c){self.postMessage({id:t,type:E.ERROR,data:c.toString()});return}s instanceof Uint8Array&&o.push(s.buffer),self.postMessage({id:t,type:n,data:s},o)}})();
2 |
--------------------------------------------------------------------------------
/frontend/gradio_agentchatbot/utils.py:
--------------------------------------------------------------------------------
1 | from typing import List, Literal, Optional, Union
2 |
3 | from gradio.data_classes import FileData, GradioModel, GradioRootModel
4 | from pydantic import Field
5 |
6 |
7 | class ThoughtMetadata(GradioModel):
8 | tool_name: Optional[str] = None
9 | error: bool = False
10 |
11 |
12 | class Message(GradioModel):
13 | role: Literal["user", "assistant"]
14 | thought_metadata: ThoughtMetadata = Field(default_factory=ThoughtMetadata)
15 |
16 |
17 | class ChatMessage(Message):
18 | content: str
19 |
20 |
21 | class ChatFileMessage(Message):
22 | file: FileData
23 | alt_text: Optional[str] = None
24 |
25 |
26 | class ChatbotData(GradioRootModel):
27 | root: List[Union[ChatMessage, ChatFileMessage]]
28 |
--------------------------------------------------------------------------------
/mindsearch/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/InternLM/MindSearch/bfb41be176f95700bcf199be69b001096425bbbd/mindsearch/__init__.py
--------------------------------------------------------------------------------
/mindsearch/agent/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from copy import deepcopy
3 | from datetime import datetime
4 |
5 | from lagent.actions import AsyncWebBrowser, WebBrowser
6 | from lagent.agents.stream import get_plugin_prompt
7 | from lagent.prompts import InterpreterParser, PluginParser
8 | from lagent.utils import create_object
9 |
10 | from . import models as llm_factory
11 | from .mindsearch_agent import AsyncMindSearchAgent, MindSearchAgent
12 | from .mindsearch_prompt import (
13 | FINAL_RESPONSE_CN,
14 | FINAL_RESPONSE_EN,
15 | GRAPH_PROMPT_CN,
16 | GRAPH_PROMPT_EN,
17 | searcher_context_template_cn,
18 | searcher_context_template_en,
19 | searcher_input_template_cn,
20 | searcher_input_template_en,
21 | searcher_system_prompt_cn,
22 | searcher_system_prompt_en,
23 | )
24 |
25 | LLM = {}
26 |
27 |
28 | def init_agent(lang="cn",
29 | model_format="internlm_server",
30 | search_engine="BingSearch",
31 | use_async=False):
32 | mode = "async" if use_async else "sync"
33 | llm = LLM.get(model_format, {}).get(mode)
34 | if llm is None:
35 | llm_cfg = deepcopy(getattr(llm_factory, model_format))
36 | if llm_cfg is None:
37 | raise NotImplementedError
38 | if use_async:
39 | cls_name = (
40 | llm_cfg["type"].split(".")[-1] if isinstance(
41 | llm_cfg["type"], str) else llm_cfg["type"].__name__)
42 | llm_cfg["type"] = f"lagent.llms.Async{cls_name}"
43 | llm = create_object(llm_cfg)
44 | LLM.setdefault(model_format, {}).setdefault(mode, llm)
45 |
46 | date = datetime.now().strftime("The current date is %Y-%m-%d.")
47 | plugins = [(dict(
48 | type=AsyncWebBrowser if use_async else WebBrowser,
49 | searcher_type=search_engine,
50 | topk=6,
51 | secret_id=os.getenv("TENCENT_SEARCH_SECRET_ID"),
52 | secret_key=os.getenv("TENCENT_SEARCH_SECRET_KEY"),
53 | ) if search_engine == "TencentSearch" else dict(
54 | type=AsyncWebBrowser if use_async else WebBrowser,
55 | searcher_type=search_engine,
56 | topk=6,
57 | api_key=os.getenv("WEB_SEARCH_API_KEY"),
58 | ))]
59 | agent = (AsyncMindSearchAgent if use_async else MindSearchAgent)(
60 | llm=llm,
61 | template=date,
62 | output_format=InterpreterParser(
63 | template=GRAPH_PROMPT_CN if lang == "cn" else GRAPH_PROMPT_EN),
64 | searcher_cfg=dict(
65 | llm=llm,
66 | plugins=plugins,
67 | template=date,
68 | output_format=PluginParser(
69 | template=searcher_system_prompt_cn
70 | if lang == "cn" else searcher_system_prompt_en,
71 | tool_info=get_plugin_prompt(plugins),
72 | ),
73 | user_input_template=(searcher_input_template_cn if lang == "cn"
74 | else searcher_input_template_en),
75 | user_context_template=(searcher_context_template_cn if lang == "cn"
76 | else searcher_context_template_en),
77 | ),
78 | summary_prompt=FINAL_RESPONSE_CN
79 | if lang == "cn" else FINAL_RESPONSE_EN,
80 | max_turn=10,
81 | )
82 | return agent
83 |
--------------------------------------------------------------------------------
/mindsearch/agent/mindsearch_agent.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import re
4 | from copy import deepcopy
5 | from typing import Dict, Tuple
6 |
7 | from lagent.schema import AgentMessage, AgentStatusCode, ModelStatusCode
8 | from lagent.utils import GeneratorWithReturn
9 |
10 | from .graph import ExecutionAction, WebSearchGraph
11 | from .streaming import AsyncStreamingAgentForInternLM, StreamingAgentForInternLM
12 |
13 |
14 | def _update_ref(ref: str, ref2url: Dict[str, str], ptr: int) -> str:
15 | numbers = list({int(n) for n in re.findall(r"\[\[(\d+)\]\]", ref)})
16 | numbers = {n: idx + 1 for idx, n in enumerate(numbers)}
17 | updated_ref = re.sub(
18 | r"\[\[(\d+)\]\]",
19 | lambda match: f"[[{numbers[int(match.group(1))] + ptr}]]",
20 | ref,
21 | )
22 | updated_ref2url = {}
23 | if numbers:
24 | try:
25 | assert all(elem in ref2url for elem in numbers)
26 | except Exception as exc:
27 | logging.info(f"Illegal reference id: {str(exc)}")
28 | if ref2url:
29 | updated_ref2url = {
30 | numbers[idx] + ptr: ref2url[idx] for idx in numbers if idx in ref2url
31 | }
32 | return updated_ref, updated_ref2url, len(numbers) + 1
33 |
34 |
35 | def _generate_references_from_graph(graph: Dict[str, dict]) -> Tuple[str, Dict[int, dict]]:
36 | ptr, references, references_url = 0, [], {}
37 | for name, data_item in graph.items():
38 | if name in ["root", "response"]:
39 | continue
40 | # only search once at each node, thus the result offset is 2
41 | assert data_item["memory"]["agent.memory"][2]["sender"].endswith("ActionExecutor")
42 | ref2url = {
43 | int(k): v
44 | for k, v in json.loads(data_item["memory"]["agent.memory"][2]["content"]).items()
45 | }
46 | updata_ref, ref2url, added_ptr = _update_ref(
47 | data_item["response"]["content"], ref2url, ptr
48 | )
49 | ptr += added_ptr
50 | references.append(f'## {data_item["content"]}\n\n{updata_ref}')
51 | references_url.update(ref2url)
52 | return "\n\n".join(references), references_url
53 |
54 |
55 | class MindSearchAgent(StreamingAgentForInternLM):
56 | def __init__(
57 | self,
58 | searcher_cfg: dict,
59 | summary_prompt: str,
60 | finish_condition=lambda m: "add_response_node" in m.content,
61 | max_turn: int = 10,
62 | **kwargs,
63 | ):
64 | WebSearchGraph.SEARCHER_CONFIG = searcher_cfg
65 | super().__init__(finish_condition=finish_condition, max_turn=max_turn, **kwargs)
66 | self.summary_prompt = summary_prompt
67 | self.action = ExecutionAction()
68 |
69 | def forward(self, message: AgentMessage, session_id=0, **kwargs):
70 | if isinstance(message, str):
71 | message = AgentMessage(sender="user", content=message)
72 | _graph_state = dict(node={}, adjacency_list={}, ref2url={})
73 | local_dict, global_dict = {}, globals()
74 | for _ in range(self.max_turn):
75 | last_agent_state = AgentStatusCode.SESSION_READY
76 | for message in self.agent(message, session_id=session_id, **kwargs):
77 | if isinstance(message.formatted, dict) and message.formatted.get("tool_type"):
78 | if message.stream_state == ModelStatusCode.END:
79 | message.stream_state = last_agent_state + int(
80 | last_agent_state
81 | in [
82 | AgentStatusCode.CODING,
83 | AgentStatusCode.PLUGIN_START,
84 | ]
85 | )
86 | else:
87 | message.stream_state = (
88 | AgentStatusCode.PLUGIN_START
89 | if message.formatted["tool_type"] == "plugin"
90 | else AgentStatusCode.CODING
91 | )
92 | else:
93 | message.stream_state = AgentStatusCode.STREAM_ING
94 | message.formatted.update(deepcopy(_graph_state))
95 | yield message
96 | last_agent_state = message.stream_state
97 | if not message.formatted["tool_type"]:
98 | message.stream_state = AgentStatusCode.END
99 | yield message
100 | return
101 |
102 | gen = GeneratorWithReturn(
103 | self.action.run(message.content, local_dict, global_dict, True)
104 | )
105 | for graph_exec in gen:
106 | graph_exec.formatted["ref2url"] = deepcopy(_graph_state["ref2url"])
107 | yield graph_exec
108 |
109 | reference, references_url = _generate_references_from_graph(gen.ret[1])
110 | _graph_state.update(node=gen.ret[1], adjacency_list=gen.ret[2], ref2url=references_url)
111 | if self.finish_condition(message):
112 | message = AgentMessage(
113 | sender="ActionExecutor",
114 | content=self.summary_prompt,
115 | formatted=deepcopy(_graph_state),
116 | stream_state=message.stream_state + 1, # plugin or code return
117 | )
118 | yield message
119 | # summarize the references to generate the final answer
120 | for message in self.agent(message, session_id=session_id, **kwargs):
121 | message.formatted.update(deepcopy(_graph_state))
122 | yield message
123 | return
124 | message = AgentMessage(
125 | sender="ActionExecutor",
126 | content=reference,
127 | formatted=deepcopy(_graph_state),
128 | stream_state=message.stream_state + 1, # plugin or code return
129 | )
130 | yield message
131 |
132 |
133 | class AsyncMindSearchAgent(AsyncStreamingAgentForInternLM):
134 | def __init__(
135 | self,
136 | searcher_cfg: dict,
137 | summary_prompt: str,
138 | finish_condition=lambda m: "add_response_node" in m.content,
139 | max_turn: int = 10,
140 | **kwargs,
141 | ):
142 | WebSearchGraph.SEARCHER_CONFIG = searcher_cfg
143 | WebSearchGraph.is_async = True
144 | WebSearchGraph.start_loop()
145 | super().__init__(finish_condition=finish_condition, max_turn=max_turn, **kwargs)
146 | self.summary_prompt = summary_prompt
147 | self.action = ExecutionAction()
148 |
149 | async def forward(self, message: AgentMessage, session_id=0, **kwargs):
150 | if isinstance(message, str):
151 | message = AgentMessage(sender="user", content=message)
152 | _graph_state = dict(node={}, adjacency_list={}, ref2url={})
153 | local_dict, global_dict = {}, globals()
154 | for _ in range(self.max_turn):
155 | last_agent_state = AgentStatusCode.SESSION_READY
156 | async for message in self.agent(message, session_id=session_id, **kwargs):
157 | if isinstance(message.formatted, dict) and message.formatted.get("tool_type"):
158 | if message.stream_state == ModelStatusCode.END:
159 | message.stream_state = last_agent_state + int(
160 | last_agent_state
161 | in [
162 | AgentStatusCode.CODING,
163 | AgentStatusCode.PLUGIN_START,
164 | ]
165 | )
166 | else:
167 | message.stream_state = (
168 | AgentStatusCode.PLUGIN_START
169 | if message.formatted["tool_type"] == "plugin"
170 | else AgentStatusCode.CODING
171 | )
172 | else:
173 | message.stream_state = AgentStatusCode.STREAM_ING
174 | message.formatted.update(deepcopy(_graph_state))
175 | yield message
176 | last_agent_state = message.stream_state
177 | if not message.formatted["tool_type"]:
178 | message.stream_state = AgentStatusCode.END
179 | yield message
180 | return
181 |
182 | gen = GeneratorWithReturn(
183 | self.action.run(message.content, local_dict, global_dict, True)
184 | )
185 | for graph_exec in gen:
186 | graph_exec.formatted["ref2url"] = deepcopy(_graph_state["ref2url"])
187 | yield graph_exec
188 |
189 | reference, references_url = _generate_references_from_graph(gen.ret[1])
190 | _graph_state.update(node=gen.ret[1], adjacency_list=gen.ret[2], ref2url=references_url)
191 | if self.finish_condition(message):
192 | message = AgentMessage(
193 | sender="ActionExecutor",
194 | content=self.summary_prompt,
195 | formatted=deepcopy(_graph_state),
196 | stream_state=message.stream_state + 1, # plugin or code return
197 | )
198 | yield message
199 | # summarize the references to generate the final answer
200 | async for message in self.agent(message, session_id=session_id, **kwargs):
201 | message.formatted.update(deepcopy(_graph_state))
202 | yield message
203 | return
204 | message = AgentMessage(
205 | sender="ActionExecutor",
206 | content=reference,
207 | formatted=deepcopy(_graph_state),
208 | stream_state=message.stream_state + 1, # plugin or code return
209 | )
210 | yield message
211 |
--------------------------------------------------------------------------------
/mindsearch/agent/models.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from dotenv import load_dotenv
4 | from lagent.llms import (
5 | GPTAPI,
6 | INTERNLM2_META,
7 | HFTransformerCasualLM,
8 | LMDeployClient,
9 | LMDeployServer,
10 | )
11 |
12 | internlm_server = dict(
13 | type=LMDeployServer,
14 | path="internlm/internlm2_5-7b-chat",
15 | model_name="internlm2_5-7b-chat",
16 | meta_template=INTERNLM2_META,
17 | top_p=0.8,
18 | top_k=1,
19 | temperature=0,
20 | max_new_tokens=8192,
21 | repetition_penalty=1.02,
22 | stop_words=["<|im_end|>"],
23 | )
24 |
25 | internlm_client = dict(
26 | type=LMDeployClient,
27 | model_name="internlm2_5-7b-chat",
28 | url="http://127.0.0.1:23333",
29 | meta_template=INTERNLM2_META,
30 | top_p=0.8,
31 | top_k=1,
32 | temperature=0,
33 | max_new_tokens=8192,
34 | repetition_penalty=1.02,
35 | stop_words=["<|im_end|>"],
36 | )
37 |
38 | internlm_hf = dict(
39 | type=HFTransformerCasualLM,
40 | path="internlm/internlm2_5-7b-chat",
41 | meta_template=INTERNLM2_META,
42 | top_p=0.8,
43 | top_k=None,
44 | temperature=1e-6,
45 | max_new_tokens=8192,
46 | repetition_penalty=1.02,
47 | stop_words=["<|im_end|>"],
48 | )
49 | # openai_api_base needs to fill in the complete chat api address, such as: https://api.openai.com/v1/chat/completions
50 | gpt4 = dict(
51 | type=GPTAPI,
52 | model_type="gpt-4-turbo",
53 | key=os.environ.get("OPENAI_API_KEY", "YOUR OPENAI API KEY"),
54 | api_base=os.environ.get("OPENAI_API_BASE",
55 | "https://api.openai.com/v1/chat/completions"),
56 | )
57 |
58 | url = "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"
59 | qwen = dict(
60 | type=GPTAPI,
61 | model_type="qwen-max-longcontext",
62 | key=os.environ.get("QWEN_API_KEY", "YOUR QWEN API KEY"),
63 | api_base=url,
64 | meta_template=[
65 | dict(role="system", api_role="system"),
66 | dict(role="user", api_role="user"),
67 | dict(role="assistant", api_role="assistant"),
68 | dict(role="environment", api_role="system"),
69 | ],
70 | top_p=0.8,
71 | top_k=1,
72 | temperature=0,
73 | max_new_tokens=4096,
74 | repetition_penalty=1.02,
75 | stop_words=["<|im_end|>"],
76 | )
77 |
78 | internlm_silicon = dict(
79 | type=GPTAPI,
80 | model_type="internlm/internlm2_5-7b-chat",
81 | key=os.environ.get("SILICON_API_KEY", "YOUR SILICON API KEY"),
82 | api_base="https://api.siliconflow.cn/v1/chat/completions",
83 | meta_template=[
84 | dict(role="system", api_role="system"),
85 | dict(role="user", api_role="user"),
86 | dict(role="assistant", api_role="assistant"),
87 | dict(role="environment", api_role="system"),
88 | ],
89 | top_p=0.8,
90 | top_k=1,
91 | temperature=0,
92 | max_new_tokens=8192,
93 | repetition_penalty=1.02,
94 | stop_words=["<|im_end|>"],
95 | )
96 |
--------------------------------------------------------------------------------
/mindsearch/agent/streaming.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from typing import List, Union
3 |
4 | from lagent.agents import Agent, AgentForInternLM, AsyncAgent, AsyncAgentForInternLM
5 | from lagent.schema import AgentMessage, AgentStatusCode, ModelStatusCode
6 |
7 |
8 | class StreamingAgentMixin:
9 | """Make agent calling output a streaming response."""
10 |
11 | def __call__(self, *message: Union[AgentMessage, List[AgentMessage]], session_id=0, **kwargs):
12 | for hook in self._hooks.values():
13 | message = copy.deepcopy(message)
14 | result = hook.before_agent(self, message, session_id)
15 | if result:
16 | message = result
17 | self.update_memory(message, session_id=session_id)
18 | response_message = AgentMessage(sender=self.name, content="")
19 | for response_message in self.forward(*message, session_id=session_id, **kwargs):
20 | if not isinstance(response_message, AgentMessage):
21 | model_state, response = response_message
22 | response_message = AgentMessage(
23 | sender=self.name,
24 | content=response,
25 | stream_state=model_state,
26 | )
27 | yield response_message.model_copy()
28 | self.update_memory(response_message, session_id=session_id)
29 | for hook in self._hooks.values():
30 | response_message = response_message.model_copy(deep=True)
31 | result = hook.after_agent(self, response_message, session_id)
32 | if result:
33 | response_message = result
34 | yield response_message
35 |
36 |
37 | class AsyncStreamingAgentMixin:
38 | """Make asynchronous agent calling output a streaming response."""
39 |
40 | async def __call__(
41 | self, *message: Union[AgentMessage, List[AgentMessage]], session_id=0, **kwargs
42 | ):
43 | for hook in self._hooks.values():
44 | message = copy.deepcopy(message)
45 | result = hook.before_agent(self, message, session_id)
46 | if result:
47 | message = result
48 | self.update_memory(message, session_id=session_id)
49 | response_message = AgentMessage(sender=self.name, content="")
50 | async for response_message in self.forward(*message, session_id=session_id, **kwargs):
51 | if not isinstance(response_message, AgentMessage):
52 | model_state, response = response_message
53 | response_message = AgentMessage(
54 | sender=self.name,
55 | content=response,
56 | stream_state=model_state,
57 | )
58 | yield response_message.model_copy()
59 | self.update_memory(response_message, session_id=session_id)
60 | for hook in self._hooks.values():
61 | response_message = response_message.model_copy(deep=True)
62 | result = hook.after_agent(self, response_message, session_id)
63 | if result:
64 | response_message = result
65 | yield response_message
66 |
67 |
68 | class StreamingAgent(StreamingAgentMixin, Agent):
69 | """Base streaming agent class"""
70 |
71 | def forward(self, *message: AgentMessage, session_id=0, **kwargs):
72 | formatted_messages = self.aggregator.aggregate(
73 | self.memory.get(session_id),
74 | self.name,
75 | self.output_format,
76 | self.template,
77 | )
78 | for model_state, response, _ in self.llm.stream_chat(
79 | formatted_messages, session_id=session_id, **kwargs
80 | ):
81 | yield AgentMessage(
82 | sender=self.name,
83 | content=response,
84 | formatted=self.output_format.parse_response(response),
85 | stream_state=model_state,
86 | ) if self.output_format else (model_state, response)
87 |
88 |
89 | class AsyncStreamingAgent(AsyncStreamingAgentMixin, AsyncAgent):
90 | """Base asynchronous streaming agent class"""
91 |
92 | async def forward(self, *message: AgentMessage, session_id=0, **kwargs):
93 | formatted_messages = self.aggregator.aggregate(
94 | self.memory.get(session_id),
95 | self.name,
96 | self.output_format,
97 | self.template,
98 | )
99 | async for model_state, response, _ in self.llm.stream_chat(
100 | formatted_messages, session_id=session_id, **kwargs
101 | ):
102 | yield AgentMessage(
103 | sender=self.name,
104 | content=response,
105 | formatted=self.output_format.parse_response(response),
106 | stream_state=model_state,
107 | ) if self.output_format else (model_state, response)
108 |
109 |
110 | class StreamingAgentForInternLM(StreamingAgentMixin, AgentForInternLM):
111 | """Streaming implementation of `lagent.agents.AgentForInternLM`"""
112 |
113 | _INTERNAL_AGENT_CLS = StreamingAgent
114 |
115 | def forward(self, message: AgentMessage, session_id=0, **kwargs):
116 | if isinstance(message, str):
117 | message = AgentMessage(sender="user", content=message)
118 | for _ in range(self.max_turn):
119 | last_agent_state = AgentStatusCode.SESSION_READY
120 | for message in self.agent(message, session_id=session_id, **kwargs):
121 | if isinstance(message.formatted, dict) and message.formatted.get("tool_type"):
122 | if message.stream_state == ModelStatusCode.END:
123 | message.stream_state = last_agent_state + int(
124 | last_agent_state
125 | in [
126 | AgentStatusCode.CODING,
127 | AgentStatusCode.PLUGIN_START,
128 | ]
129 | )
130 | else:
131 | message.stream_state = (
132 | AgentStatusCode.PLUGIN_START
133 | if message.formatted["tool_type"] == "plugin"
134 | else AgentStatusCode.CODING
135 | )
136 | else:
137 | message.stream_state = AgentStatusCode.STREAM_ING
138 | yield message
139 | last_agent_state = message.stream_state
140 | if self.finish_condition(message):
141 | message.stream_state = AgentStatusCode.END
142 | yield message
143 | return
144 | if message.formatted["tool_type"]:
145 | tool_type = message.formatted["tool_type"]
146 | executor = getattr(self, f"{tool_type}_executor", None)
147 | if not executor:
148 | raise RuntimeError(f"No available {tool_type} executor")
149 | tool_return = executor(message, session_id=session_id)
150 | tool_return.stream_state = message.stream_state + 1
151 | message = tool_return
152 | yield message
153 | else:
154 | message.stream_state = AgentStatusCode.STREAM_ING
155 | yield message
156 |
157 |
158 | class AsyncStreamingAgentForInternLM(AsyncStreamingAgentMixin, AsyncAgentForInternLM):
159 | """Streaming implementation of `lagent.agents.AsyncAgentForInternLM`"""
160 |
161 | _INTERNAL_AGENT_CLS = AsyncStreamingAgent
162 |
163 | async def forward(self, message: AgentMessage, session_id=0, **kwargs):
164 | if isinstance(message, str):
165 | message = AgentMessage(sender="user", content=message)
166 | for _ in range(self.max_turn):
167 | last_agent_state = AgentStatusCode.SESSION_READY
168 | async for message in self.agent(message, session_id=session_id, **kwargs):
169 | if isinstance(message.formatted, dict) and message.formatted.get("tool_type"):
170 | if message.stream_state == ModelStatusCode.END:
171 | message.stream_state = last_agent_state + int(
172 | last_agent_state
173 | in [
174 | AgentStatusCode.CODING,
175 | AgentStatusCode.PLUGIN_START,
176 | ]
177 | )
178 | else:
179 | message.stream_state = (
180 | AgentStatusCode.PLUGIN_START
181 | if message.formatted["tool_type"] == "plugin"
182 | else AgentStatusCode.CODING
183 | )
184 | else:
185 | message.stream_state = AgentStatusCode.STREAM_ING
186 | yield message
187 | last_agent_state = message.stream_state
188 | if self.finish_condition(message):
189 | message.stream_state = AgentStatusCode.END
190 | yield message
191 | return
192 | if message.formatted["tool_type"]:
193 | tool_type = message.formatted["tool_type"]
194 | executor = getattr(self, f"{tool_type}_executor", None)
195 | if not executor:
196 | raise RuntimeError(f"No available {tool_type} executor")
197 | tool_return = await executor(message, session_id=session_id)
198 | tool_return.stream_state = message.stream_state + 1
199 | message = tool_return
200 | yield message
201 | else:
202 | message.stream_state = AgentStatusCode.STREAM_ING
203 | yield message
204 |
--------------------------------------------------------------------------------
/mindsearch/app.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import logging
4 | import random
5 | from typing import Dict, List, Union
6 |
7 | import janus
8 | from fastapi import FastAPI
9 | from fastapi.middleware.cors import CORSMiddleware
10 | from fastapi.requests import Request
11 | from pydantic import BaseModel, Field
12 | from sse_starlette.sse import EventSourceResponse
13 |
14 | from mindsearch.agent import init_agent
15 |
16 |
17 | def parse_arguments():
18 | import argparse
19 |
20 | parser = argparse.ArgumentParser(description="MindSearch API")
21 | parser.add_argument("--host", default="0.0.0.0", type=str, help="Service host")
22 | parser.add_argument("--port", default=8002, type=int, help="Service port")
23 | parser.add_argument("--lang", default="cn", type=str, help="Language")
24 | parser.add_argument("--model_format", default="internlm_server", type=str, help="Model format")
25 | parser.add_argument("--search_engine", default="BingSearch", type=str, help="Search engine")
26 | parser.add_argument("--asy", default=False, action="store_true", help="Agent mode")
27 | return parser.parse_args()
28 |
29 |
30 | args = parse_arguments()
31 | app = FastAPI(docs_url="/")
32 | app.add_middleware(
33 | CORSMiddleware,
34 | allow_origins=["*"],
35 | allow_credentials=True,
36 | allow_methods=["*"],
37 | allow_headers=["*"],
38 | )
39 |
40 |
41 | class GenerationParams(BaseModel):
42 | inputs: Union[str, List[Dict]]
43 | session_id: int = Field(default_factory=lambda: random.randint(0, 999999))
44 | agent_cfg: Dict = dict()
45 |
46 |
47 | def _postprocess_agent_message(message: dict) -> dict:
48 | content, fmt = message["content"], message["formatted"]
49 | current_node = content["current_node"] if isinstance(content, dict) else None
50 | if current_node:
51 | message["content"] = None
52 | for key in ["ref2url"]:
53 | fmt.pop(key, None)
54 | graph = fmt["node"]
55 | for key in graph.copy():
56 | if key != current_node:
57 | graph.pop(key)
58 | if current_node not in ["root", "response"]:
59 | node = graph[current_node]
60 | for key in ["memory", "session_id"]:
61 | node.pop(key, None)
62 | node_fmt = node["response"]["formatted"]
63 | if isinstance(node_fmt, dict) and "thought" in node_fmt and "action" in node_fmt:
64 | node["response"]["content"] = None
65 | node_fmt["thought"] = (
66 | node_fmt["thought"] and node_fmt["thought"].split("<|action_start|>")[0]
67 | )
68 | if isinstance(node_fmt["action"], str):
69 | node_fmt["action"] = node_fmt["action"].split("<|action_end|>")[0]
70 | else:
71 | if isinstance(fmt, dict) and "thought" in fmt and "action" in fmt:
72 | message["content"] = None
73 | fmt["thought"] = fmt["thought"] and fmt["thought"].split("<|action_start|>")[0]
74 | if isinstance(fmt["action"], str):
75 | fmt["action"] = fmt["action"].split("<|action_end|>")[0]
76 | for key in ["node"]:
77 | fmt.pop(key, None)
78 | return dict(current_node=current_node, response=message)
79 |
80 |
81 | async def run(request: GenerationParams, _request: Request):
82 | async def generate():
83 | try:
84 | queue = janus.Queue()
85 | stop_event = asyncio.Event()
86 |
87 | # Wrapping a sync generator as an async generator using run_in_executor
88 | def sync_generator_wrapper():
89 | try:
90 | for response in agent(inputs, session_id=session_id):
91 | queue.sync_q.put(response)
92 | except Exception as e:
93 | logging.exception(f"Exception in sync_generator_wrapper: {e}")
94 | finally:
95 | # Notify async_generator_wrapper that the data generation is complete.
96 | queue.sync_q.put(None)
97 |
98 | async def async_generator_wrapper():
99 | loop = asyncio.get_event_loop()
100 | loop.run_in_executor(None, sync_generator_wrapper)
101 | while True:
102 | response = await queue.async_q.get()
103 | if response is None: # Ensure that all elements are consumed
104 | break
105 | yield response
106 | stop_event.set() # Inform sync_generator_wrapper to stop
107 |
108 | async for message in async_generator_wrapper():
109 | response_json = json.dumps(
110 | _postprocess_agent_message(message.model_dump()),
111 | ensure_ascii=False,
112 | )
113 | yield {"data": response_json}
114 | if await _request.is_disconnected():
115 | break
116 | except Exception as exc:
117 | msg = "An error occurred while generating the response."
118 | logging.exception(msg)
119 | response_json = json.dumps(
120 | dict(error=dict(msg=msg, details=str(exc))), ensure_ascii=False
121 | )
122 | yield {"data": response_json}
123 | finally:
124 | await stop_event.wait() # Waiting for async_generator_wrapper to stop
125 | queue.close()
126 | await queue.wait_closed()
127 | agent.agent.memory.memory_map.pop(session_id, None)
128 |
129 | inputs = request.inputs
130 | session_id = request.session_id
131 | agent = init_agent(
132 | lang=args.lang,
133 | model_format=args.model_format,
134 | search_engine=args.search_engine,
135 | )
136 | return EventSourceResponse(generate(), ping=300)
137 |
138 |
139 | async def run_async(request: GenerationParams, _request: Request):
140 | async def generate():
141 | try:
142 | async for message in agent(inputs, session_id=session_id):
143 | response_json = json.dumps(
144 | _postprocess_agent_message(message.model_dump()),
145 | ensure_ascii=False,
146 | )
147 | yield {"data": response_json}
148 | if await _request.is_disconnected():
149 | break
150 | except Exception as exc:
151 | msg = "An error occurred while generating the response."
152 | logging.exception(msg)
153 | response_json = json.dumps(
154 | dict(error=dict(msg=msg, details=str(exc))), ensure_ascii=False
155 | )
156 | yield {"data": response_json}
157 | finally:
158 | agent.agent.memory.memory_map.pop(session_id, None)
159 |
160 | inputs = request.inputs
161 | session_id = request.session_id
162 | agent = init_agent(
163 | lang=args.lang,
164 | model_format=args.model_format,
165 | search_engine=args.search_engine,
166 | use_async=True,
167 | )
168 | return EventSourceResponse(generate(), ping=300)
169 |
170 |
171 | app.add_api_route("/solve", run_async if args.asy else run, methods=["POST"])
172 |
173 | if __name__ == "__main__":
174 | import uvicorn
175 |
176 | uvicorn.run(app, host=args.host, port=args.port, log_level="info")
177 |
--------------------------------------------------------------------------------
/mindsearch/terminal.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from datetime import datetime
4 |
5 | from lagent.actions import WebBrowser
6 | from lagent.agents.stream import get_plugin_prompt
7 | from lagent.llms import INTERNLM2_META, LMDeployServer
8 | from lagent.prompts import InterpreterParser, PluginParser
9 |
10 | from mindsearch.agent.mindsearch_agent import MindSearchAgent
11 | from mindsearch.agent.mindsearch_prompt import (
12 | FINAL_RESPONSE_CN,
13 | FINAL_RESPONSE_EN,
14 | GRAPH_PROMPT_CN,
15 | GRAPH_PROMPT_EN,
16 | searcher_context_template_cn,
17 | searcher_context_template_en,
18 | searcher_input_template_cn,
19 | searcher_input_template_en,
20 | searcher_system_prompt_cn,
21 | searcher_system_prompt_en,
22 | )
23 |
24 | lang = "cn"
25 | date = datetime.now().strftime("The current date is %Y-%m-%d.")
26 | llm = LMDeployServer(
27 | path="internlm/internlm2_5-7b-chat",
28 | model_name="internlm2",
29 | meta_template=INTERNLM2_META,
30 | top_p=0.8,
31 | top_k=1,
32 | temperature=1.0,
33 | max_new_tokens=8192,
34 | repetition_penalty=1.02,
35 | stop_words=["<|im_end|>", "<|action_end|>"],
36 | )
37 | plugins = [WebBrowser(searcher_type="BingSearch", topk=6)]
38 | agent = MindSearchAgent(
39 | llm=llm,
40 | template=date,
41 | output_format=InterpreterParser(template=GRAPH_PROMPT_CN if lang == "cn" else GRAPH_PROMPT_EN),
42 | searcher_cfg=dict(
43 | llm=llm,
44 | plugins=plugins,
45 | template=date,
46 | output_format=PluginParser(
47 | template=searcher_system_prompt_cn if lang == "cn" else searcher_system_prompt_en,
48 | tool_info=get_plugin_prompt(plugins),
49 | ),
50 | user_input_template=searcher_input_template_cn
51 | if lang == "cn"
52 | else searcher_input_template_en,
53 | user_context_template=searcher_context_template_cn
54 | if lang == "cn"
55 | else searcher_context_template_en,
56 | ),
57 | summary_prompt=FINAL_RESPONSE_CN if lang == "cn" else FINAL_RESPONSE_EN,
58 | max_turn=10,
59 | )
60 |
61 | for agent_return in agent("上海今天适合穿什么衣服"):
62 | pass
63 |
64 | print(agent_return.sender)
65 | print(agent_return.content)
66 | print(agent_return.formatted["ref2url"])
67 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | duckduckgo_search==5.3.1b1
2 | einops
3 | fastapi
4 | gradio==5.7.1
5 | janus
6 | lagent==0.5.0rc2
7 | matplotlib
8 | pydantic==2.6.4
9 | python-dotenv
10 | pyvis
11 | schemdraw
12 | sse-starlette
13 | termcolor
14 | transformers==4.41.0
15 | uvicorn
16 | tenacity
17 |
--------------------------------------------------------------------------------