├── .gitignore ├── .gitmodules ├── LICENSE ├── READEME_EN.md ├── README.md ├── img ├── demo.gif └── self.png ├── include ├── AES_Crypto.h ├── ChatBot.h ├── Configure.h ├── Downloader.h ├── ImGuiSpinners.h ├── Impls │ ├── Bots.h │ ├── ChatGPT_Impl.h │ ├── Claude_Impl.h │ ├── CustomRule_Impl.h │ ├── Gemini_Impl.h │ └── LLama_Impl.h ├── Logger.h ├── Progress.hpp ├── Recorder.h ├── Script.h ├── StableDiffusion.h ├── Translate.h ├── VoiceToText.h ├── base64.h ├── imfilebrowser.h ├── imgui_markdown.h ├── pch.h ├── stb_image.h ├── stb_image_resize.h ├── stb_image_write.h └── utils.h ├── sample ├── Application.cpp ├── Application.h ├── CMakeLists.txt ├── Resources │ ├── Chatbot.manifest │ ├── Info.plist │ ├── Info.rc │ ├── Mao │ │ ├── Mao.2048 │ │ │ └── texture_00.png │ │ ├── Mao.cdi3.json │ │ ├── Mao.moc3 │ │ ├── Mao.model3.json │ │ ├── Mao.physics3.json │ │ ├── Mao.pose3.json │ │ ├── expressions │ │ │ ├── exp_01.exp3.json │ │ │ ├── exp_02.exp3.json │ │ │ ├── exp_03.exp3.json │ │ │ ├── exp_04.exp3.json │ │ │ ├── exp_05.exp3.json │ │ │ ├── exp_06.exp3.json │ │ │ ├── exp_07.exp3.json │ │ │ └── exp_08.exp3.json │ │ └── motions │ │ │ ├── mtn_01.motion3.json │ │ │ ├── mtn_02.motion3.json │ │ │ ├── mtn_03.motion3.json │ │ │ ├── mtn_04.motion3.json │ │ │ ├── special_01.motion3.json │ │ │ └── special_02.motion3.json │ ├── Plugins │ │ └── Test │ │ │ └── Plugin.lua │ ├── PythonScripts │ │ └── getInstalledPackage.py │ ├── RCa15684 │ ├── add.png │ ├── avatar.png │ ├── cancel.png │ ├── copy.png │ ├── default avatar.png │ ├── del.png │ ├── dll_search_paths.props │ ├── edit.png │ ├── eye.png │ ├── font │ │ └── default.otf │ ├── icon.aps │ ├── icon.icns │ ├── icon.ico │ ├── icon.png │ ├── jieba │ │ └── dict.txt │ ├── left.png │ ├── message.png │ ├── pause.png │ ├── play.png │ ├── resource.h │ ├── right.png │ └── send.png ├── SystemRole.h ├── Vendor │ ├── CMakeLists.txt │ └── llava │ │ ├── CMakeLists.txt │ │ ├── MobileVLM-README.md │ │ ├── README-glmedge.md │ │ ├── README-minicpmo2.6.md │ │ ├── README-minicpmv2.5.md │ │ ├── README-minicpmv2.6.md │ │ ├── README-quantize.md │ │ ├── README.md │ │ ├── android │ │ ├── adb_run.sh │ │ └── build_64.sh │ │ ├── clip-quantize-cli.cpp │ │ ├── clip.cpp │ │ ├── clip.h │ │ ├── convert_image_encoder_to_gguf.py │ │ ├── glmedge-convert-image-encoder-to-gguf.py │ │ ├── glmedge-surgery.py │ │ ├── llava-cli.cpp │ │ ├── llava.cpp │ │ ├── llava.h │ │ ├── llava_surgery.py │ │ ├── llava_surgery_v2.py │ │ ├── minicpmv-cli.cpp │ │ ├── minicpmv-convert-image-encoder-to-gguf.py │ │ ├── minicpmv-surgery.py │ │ ├── qwen2_vl_surgery.py │ │ ├── qwen2vl-cli.cpp │ │ └── requirements.txt ├── encrypted_systemrole.h ├── encrypted_systemrole_ex.h ├── main.cpp ├── vcpkg-configuration.json └── vcpkg.json └── src ├── AES_Crypto.cpp ├── ChatBot.cpp ├── Downloader.cpp ├── Impls ├── ChatGPT_Impl.cpp ├── Claude_Impl.cpp ├── CustomRule_Impl.cpp ├── Gemini_Impl.cpp └── LLama_Impl.cpp ├── Logger.cpp ├── Recorder.cpp ├── Script.cpp ├── StableDiffusion.cpp ├── Translate.cpp ├── VoiceToText.cpp ├── base64.cpp ├── stb_image.cpp ├── stb_image_resize.cpp ├── stb_image_wirte.cpp └── utils.cpp /.gitignore: -------------------------------------------------------------------------------- 1 | # 默认忽略的文件 2 | /shelf/ 3 | /workspace.xml 4 | # 基于编辑器的 HTTP 客户端请求 5 | /httpRequests/ 6 | # Datasource local storage ignored files 7 | /dataSources/ 8 | /dataSources.local.xml 9 | cmake-build*/ 10 | 11 | # 忽略IDE的配置文件 12 | .idea/ 13 | .vscode/ 14 | build/ 15 | 16 | # 17 | *.zip -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "sample/Vendor/spdlog"] 2 | path = sample/Vendor/spdlog 3 | url = https://github.com/gabime/spdlog.git 4 | [submodule "Vendor/libarchive"] 5 | path = sample/Vendor/libarchive 6 | url = https://github.com/libarchive/libarchive 7 | [submodule "sample/Vendor/libarchive"] 8 | path = sample/Vendor/libarchive 9 | url = https://github.com/libarchive/libarchive 10 | [submodule "sample/Vendor/imgui"] 11 | path = sample/Vendor/imgui 12 | url = https://github.com/ocornut/imgui.git 13 | branch = docking 14 | -------------------------------------------------------------------------------- /READEME_EN.md: -------------------------------------------------------------------------------- 1 |

2 | ChatBot 3 |

4 | 5 |
6 | 7 | # 🤖 ChatBot - Multi-functional AI Assistant Framework 8 | 9 | [English](README_EN.md) | [中文](README.md) 10 | 11 | _✨ All-in-one AI interaction solution: Voice wake-up, multimodal dialogue, local execution, cross-platform support ✨_ 12 | 13 |
14 | 15 |

16 | 17 | 18 | license 19 | 20 | stars 21 | forks 22 |

23 | 24 | ## ✨ Key Features 25 | 26 | 27 | 28 | 32 | 36 | 37 | 38 | 42 | 46 | 47 | 48 | 52 | 56 | 57 | 58 | 62 | 66 | 67 | 68 | 72 | 76 | 77 |
29 |

🧠 Local Large Models

30 |

Supports running local large models directly without additional software, reducing hardware requirements and ensuring privacy

31 |
33 |

🔮 Multi-API Support

34 |

Supports OpenAI, Claude, iFlytek Spark, Huoshan AI, Tongyi Qianwen, Tencent Hunyuan, Baichuan AI, Gemini, and more

35 |
39 |

🐳 Ollama Integration

40 |

Seamless integration with Ollama, supporting OpenAI-compatible network APIs for flexible model invocation

41 |
43 |

🎨 AI Art Creation

44 |

Integrated Stable Diffusion for generating high-quality AI images, unleashing creative potential

45 |
49 |

🔊 Voice Interaction

50 |

Supports voice wake-up and real-time conversation for natural and smooth human-machine interaction

51 |
53 |

📊 Math Processing

54 |

Powerful mathematical computation capabilities for handling complex calculations and scientific analysis

55 |
59 |

🧩 Extensible Scripts

60 |

Customize functionality through Lua scripts for flexible expansion and personalized needs

61 |
63 |

👩‍💻 Code Assistant

64 |

Supports code project creation and code completion, serving as a developer's powerful tool

65 |
69 |

💻 Local Execution

70 |

Supports local command execution for safer and more efficient task completion

71 |
73 |

👾 Live2D Models

74 |

Supports Live2D model display, creating vivid and engaging visual interaction experiences

75 |
78 | 79 | ## 📝 System Demo 80 | 81 | ![System Demo](img/demo.gif) 82 | 83 | ## 🛠️ Environment Setup 84 | 85 | ### Dependencies 86 | 87 | ChatBot requires the following dependencies: 88 | 89 | - nlohmann-json: Modern C++ JSON library 90 | - cpr: Simplified HTTP request library for C++ 91 | - PortAudio: Cross-platform audio I/O library 92 | - OpenGL: Graphics rendering library 93 | - imgui: Lightweight GUI library 94 | - glfw3: Window and OpenGL context creation 95 | - yaml-cpp: YAML parsing library 96 | - sol2: Lua C++ API wrapper 97 | - Lua: Lightweight scripting language 98 | - Stb: Single-file library collection 99 | - SDL2/SDL2_image: Multimedia library 100 | - glad: OpenGL loader library 101 | - OpenSSL: Secure communication library 102 | 103 | ### Installing Dependencies with vcpkg 104 | 105 | #### VCPKG Installation 106 | 107 |
108 | Windows 109 | 110 | ```bash 111 | git clone https://github.com/Microsoft/vcpkg.git 112 | cd vcpkg 113 | ./bootstrap-vcpkg.bat 114 | ``` 115 | 116 |
117 | 118 |
119 | Linux 120 | 121 | ```bash 122 | git clone https://github.com/Microsoft/vcpkg.git 123 | cd vcpkg 124 | ./bootstrap-vcpkg.sh 125 | ``` 126 | 127 |
128 | 129 | #### Installing Dependencies 130 | 131 | ```bash 132 | vcpkg install nlohmann-json cpr PortAudio OpenGL imgui glfw3 yaml-cpp sol2 Lua Stb SDL2 SDL2_image glad OpenSSL 133 | vcpkg integrate install 134 | ``` 135 | 136 | ## 🚀 Compilation Guide 137 | 138 | ```bash 139 | cd ChatBot 140 | mkdir build 141 | cd build 142 | cmake -B build/ -S . -DCMAKE_TOOLCHAIN_FILE=path/to/vcpkg.cmake 143 | cd build 144 | cmake --build . 145 | ``` 146 | 147 | ## 💡 Usage Tips 148 | 149 | 1. On first launch, configure your API keys or local model paths in the configuration file 150 | 2. Extend functionality by customizing Lua scripts 151 | 3. Place Live2D models in the `models/Live2D/` folder to load them 152 | 4. Local large models can be launched directly without additional dependencies 153 | 5. Supports third-party API services compatible with OpenAI interfaces 154 | 155 | ## 🌐 API Support 156 | 157 | This project supports the following API types: 158 | 159 | - **Public API Services** 160 | - OpenAI (GPT series) 161 | - Claude (Anthropic) 162 | - Google Gemini 163 | - iFlytek Spark 164 | - Huoshan AI 165 | - Tongyi Qianwen 166 | - Tencent Hunyuan 167 | - Baichuan AI 168 | - OpenAI-compatible API services 169 | 170 | - **Local Models** 171 | - Ollama (supports various open-source models) 172 | - LLama (various local models) 173 | 174 | ## 🔗 Related Links 175 | 176 | - [Project Documentation](https://github.com/NGLSG/ChatBot/wiki) 177 | - [Issue Tracker](https://github.com/NGLSG/ChatBot/issues) 178 | - [Changelog](https://github.com/NGLSG/ChatBot/blob/main/CHANGELOG.md) 179 | 180 | ## 📊 Development Roadmap 181 | 182 | - [ ] Multi-language interface support 183 | - [ ] Mobile adaptation 184 | - [ ] Plugin marketplace 185 | - [ ] More API support 186 | 187 | ## ⭐ Supporting the Project 188 | 189 | If you like this project, please give us a star! Your support motivates us to keep improving. 190 | 191 | ## 📄 License 192 | 193 | This project is licensed under the [GNU General Public License v3.0](LICENSE) (GPL-3.0). This means you are free to use, 194 | modify, and distribute the software, but any derivative works must also be released under the same license. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | ChatBot 3 |

4 | 5 |
6 | 7 | # 🤖 ChatBot - 多功能AI助手框架 8 | 9 | [English](README_EN.md) | [中文](README.md) 10 | 11 | _✨ 一站式AI交互解决方案:语音唤醒、多模态对话、本地执行、跨平台支持 ✨_ 12 | 13 |
14 | 15 |

16 | 17 | 18 | license 19 | 20 | stars 21 | forks 22 |

23 | 24 | ## ✨ 功能亮点 25 | 26 | 27 | 28 | 32 | 36 | 37 | 38 | 42 | 46 | 47 | 48 | 52 | 56 | 57 | 58 | 62 | 66 | 67 | 68 | 72 | 76 | 77 |
29 |

🧠 本地大模型

30 |

支持直接运行本地大模型,无需安装其他软件,降低硬件要求,保护隐私安全

31 |
33 |

🔮 多API支持

34 |

支持OpenAI、Claude、讯飞星火、火山引擎、通义千问、腾讯混元、百川AI、Gemini等多种API

35 |
39 |

🐳 Ollama集成

40 |

无缝集成Ollama,支持泛OpenAI接口的网络API,实现灵活模型调用

41 |
43 |

🎨 AI艺术创作

44 |

集成Stable Diffusion,轻松生成高质量AI图像,释放创意潜能

45 |
49 |

🔊 语音交互

50 |

支持语音唤醒与实时对话,自然流畅的人机交互体验

51 |
53 |

📊 数学处理

54 |

强大的数学运算能力,轻松处理复杂计算和科学分析

55 |
59 |

🧩 可扩展脚本

60 |

通过Lua脚本定制功能,灵活扩展,满足个性化需求

61 |
63 |

👩‍💻 代码助手

64 |

支持代码项目创建与代码补全,成为开发者得力助手

65 |
69 |

💻 本地执行

70 |

支持本地命令执行,更安全、更高效地完成任务

71 |
73 |

👾 Live2D模型

74 |

支持Live2D模型展示,创造生动有趣的视觉交互体验

75 |
78 | 79 | ## 📝 系统演示 80 | 81 | ![系统演示](img/demo.gif) 82 | 83 | ## 🛠️ 环境配置 84 | 85 | ### 依赖项 86 | 87 | ChatBot需要以下依赖项: 88 | 89 | - nlohmann-json:现代C++的JSON处理库 90 | - cpr:简化HTTP请求的C++库 91 | - PortAudio:跨平台音频I/O库 92 | - OpenGL:图形渲染库 93 | - imgui:轻量级GUI库 94 | - glfw3:创建窗口与OpenGL上下文 95 | - yaml-cpp:YAML解析库 96 | - sol2:Lua C++ API封装库 97 | - Lua:轻量级脚本语言 98 | - Stb:单文件库集合 99 | - SDL2/SDL2_image:多媒体库 100 | - glad:OpenGL加载库 101 | - OpenSSL:安全通信库 102 | 103 | ### 使用vcpkg安装依赖 104 | 105 | #### VCPKG 安装 106 | 107 |
108 | Windows 109 | 110 | ```bash 111 | git clone https://github.com/Microsoft/vcpkg.git 112 | cd vcpkg 113 | ./bootstrap-vcpkg.bat 114 | ``` 115 |
116 | 117 |
118 | Linux 119 | 120 | ```bash 121 | git clone https://github.com/Microsoft/vcpkg.git 122 | cd vcpkg 123 | ./bootstrap-vcpkg.sh 124 | ``` 125 |
126 | 127 | #### 安装依赖项 128 | 129 | ```bash 130 | vcpkg install nlohmann-json cpr PortAudio OpenGL imgui glfw3 yaml-cpp sol2 Lua Stb SDL2 SDL2_image glad OpenSSL 131 | vcpkg integrate install 132 | ``` 133 | 134 | ## 🚀 编译指南 135 | 136 | ```bash 137 | cd ChatBot 138 | mkdir build 139 | cd build 140 | cmake -B build/ -S . -DCMAKE_TOOLCHAIN_FILE=path/to/vcpkg.cmake 141 | cd build 142 | cmake --build . 143 | ``` 144 | 145 | ## 💡 使用提示 146 | 147 | 1. 首次启动时,需要在配置文件中设置您的API密钥或者本地模型路径 148 | 2. 可通过自定义Lua脚本扩展功能 149 | 3. Live2D模型放置于models/Live2D/文件夹中即可加载 150 | 4. 本地大模型可直接启动,无需额外安装依赖 151 | 5. 支持兼容OpenAI接口的第三方API服务 152 | 153 | ## 🌐 API支持 154 | 155 | 本项目支持以下API类型: 156 | 157 | - **公共API服务** 158 | - OpenAI (GPT系列) 159 | - Claude (Anthropic) 160 | - Google Gemini 161 | - 讯飞星火 162 | - 火山引擎 163 | - 通义千问 164 | - 腾讯混元 165 | - 百川AI 166 | - 泛OpenAI接口的API服务 167 | 168 | - **本地模型** 169 | - Ollama (支持各种开源模型) 170 | - LLama (各种本地模型) 171 | 172 | ## 🔗 相关链接 173 | 174 | - [项目文档](https://github.com/NGLSG/ChatBot/wiki) 175 | - [问题反馈](https://github.com/NGLSG/ChatBot/issues) 176 | - [更新日志](https://github.com/NGLSG/ChatBot/blob/main/CHANGELOG.md) 177 | 178 | ## 📊 开发路线图 179 | 180 | - [ ] 多语言界面支持 181 | - [ ] 移动端适配 182 | - [ ] 插件市场 183 | - [ ] 更多API支持 184 | 185 | ## ⭐ 支持项目 186 | 187 | 如果您喜欢这个项目,请给我们点个星!您的支持是我们不断改进的动力。 188 | 189 | ## 📄 许可证 190 | 191 | 本项目采用 [GNU通用公共许可证v3.0](LICENSE) (GPL-3.0)。这意味着您可以自由地使用、修改和分发本软件,但任何基于本软件的衍生作品也必须以相同的许可证发布。 192 | 193 | 194 | -------------------------------------------------------------------------------- /img/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NGLSG/ChatBot/a3e43f702910a309154d134637e1cf024ccd02dc/img/demo.gif -------------------------------------------------------------------------------- /img/self.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NGLSG/ChatBot/a3e43f702910a309154d134637e1cf024ccd02dc/img/self.png -------------------------------------------------------------------------------- /include/AES_Crypto.h: -------------------------------------------------------------------------------- 1 | #ifndef SECURE_AES_H 2 | #define SECURE_AES_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | class SecureAES { 11 | public: 12 | static void X1(); 13 | static void X2(); 14 | static std::vector X3(const std::string& X4); 15 | static std::string X5(const std::vector& X6); 16 | static std::string X7(const std::vector& X8); 17 | static std::vector X9(const std::string& X10); 18 | 19 | private: 20 | static std::vector X11(); 21 | static std::vector X12(const std::vector& X13, std::vector& X14); 22 | static std::vector X15(const std::vector& X16, const std::vector& X17); 23 | }; 24 | 25 | #endif -------------------------------------------------------------------------------- /include/ChatBot.h: -------------------------------------------------------------------------------- 1 | #ifndef CHATBOT_H 2 | #define CHATBOT_H 3 | 4 | #include "Utils.h" 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "Configure.h" 12 | #include "Logger.h" 13 | #include 14 | #include 15 | using json = nlohmann::json; 16 | using namespace std; 17 | 18 | namespace Role 19 | { 20 | static std::string User = "user"; 21 | static std::string System = "system"; 22 | static std::string Assistant = "assistant"; 23 | }; 24 | 25 | struct Billing 26 | { 27 | float total = -1; 28 | float available = -1; 29 | float used = -1; 30 | long long date = -1; 31 | }; 32 | 33 | struct DString 34 | { 35 | std::string* str1; 36 | std::string* str2; 37 | std::string* response; 38 | 39 | void* instance; 40 | std::mutex mtx; 41 | }; 42 | 43 | class ChatBot 44 | { 45 | public: 46 | friend class StringExecutor; 47 | virtual std::string Submit(std::string prompt, size_t timeStamp, std::string role = Role::User, 48 | std::string convid = "default", float temp = 0.7f, 49 | float top_p = 0.9f, 50 | uint32_t top_k = 40u, 51 | float pres_pen = 0.0f, 52 | float freq_pen = 0.0f, bool async = false) = 0; 53 | 54 | virtual void BuildHistory(const std::vector>& history) =0; 55 | virtual std::string GetModel() =0; 56 | 57 | void SubmitAsync( 58 | std::string prompt, 59 | size_t timeStamp, 60 | std::string role = Role::User, 61 | std::string convid = "default", 62 | float temp = 0.7f, 63 | float top_p = 0.9f, 64 | uint32_t top_k = 40u, 65 | float pres_pen = 0.0f, 66 | float freq_pen = 0.0f) 67 | { 68 | { 69 | std::lock_guard lock(forceStopMutex); 70 | forceStop = false; 71 | } 72 | lastFinalResponse = ""; 73 | std::get<1>(Response[timeStamp]) = false; 74 | std::thread([=] 75 | { 76 | Submit(prompt, timeStamp, role, convid, temp, top_p, top_k, pres_pen, freq_pen, true); 77 | }).detach(); 78 | } 79 | 80 | 81 | std::string GetLastRawResponse() 82 | { 83 | std::string response = lastRawResponse; 84 | lastRawResponse = ""; 85 | return response; 86 | } 87 | 88 | void ForceStop() 89 | { 90 | std::lock_guard lock(forceStopMutex); 91 | forceStop = true; 92 | } 93 | 94 | virtual void Reset() = 0; 95 | 96 | virtual void Load(std::string name = "default") = 0; 97 | 98 | virtual void Save(std::string name = "default") = 0; 99 | 100 | virtual void Del(std::string name) = 0; 101 | 102 | virtual void Add(std::string name) = 0; 103 | 104 | 105 | std::string GetResponse(size_t uid) 106 | { 107 | std::string response; 108 | response = std::get<0>(Response[uid]); 109 | lastFinalResponse += response; 110 | std::get<0>(Response[uid]) = ""; 111 | return response; 112 | } 113 | 114 | bool Finished(size_t uid) 115 | { 116 | return std::get<1>(Response[uid]); 117 | } 118 | 119 | virtual map GetHistory() = 0; 120 | 121 | 122 | map History; 123 | std::atomic forceStop{false}; 124 | virtual std::string sendRequest(std::string data, size_t ts) =0; 125 | 126 | protected: 127 | long long lastTimeStamp = 0; 128 | std::mutex fileAccessMutex; 129 | std::mutex historyAccessMutex; 130 | std::string lastFinalResponse; 131 | std::string lastRawResponse; 132 | unordered_map> Response; //ts,response,finished 133 | 134 | std::mutex forceStopMutex; 135 | }; 136 | 137 | inline static int ProgressCallback(void* clientp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, 138 | curl_off_t ulnow) 139 | { 140 | ChatBot* chatBot = static_cast(clientp); 141 | if (chatBot && chatBot->forceStop.load()) 142 | { 143 | return 1; 144 | } 145 | return 0; 146 | } 147 | 148 | #endif 149 | -------------------------------------------------------------------------------- /include/Downloader.h: -------------------------------------------------------------------------------- 1 | // 2 | // Created by 92703 on 2025/2/8. 3 | // 4 | 5 | #ifndef DOWNLOADER_H 6 | #define DOWNLOADER_H 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | template 19 | static inline std::string FormatString(const std::string& fmt, Args... args) 20 | { 21 | std::string result = fmt; 22 | std::stringstream ss; 23 | int index = 0; 24 | 25 | // 使用折叠表达式替换占位符 26 | ([&](const auto& arg) 27 | { 28 | std::string placeholder = "{" + std::to_string(index++) + "}"; 29 | size_t pos = result.find(placeholder); 30 | if (pos != std::string::npos) 31 | { 32 | ss.str(""); // 清空 stringstream 33 | ss << arg; // 将参数写入 stringstream 34 | result.replace(pos, placeholder.length(), ss.str()); 35 | } 36 | }(args), ...); // 折叠表达式展开参数包 37 | 38 | return result; 39 | } 40 | 41 | static bool verifyHash(const std::string& path, const std::string& hash); 42 | 43 | 44 | enum UrlStatus 45 | { 46 | URunning = 1 << 0, // 00001 47 | UPaused = 1 << 1, // 00010 48 | UFinished = 1 << 2, // 00100 49 | UStopped = 1 << 3, // 01000 50 | USeeding = 1 << 4, // 10000 51 | UError = 1 << 5, // 100000 52 | UQuit = 1 << 6, // 1000000 53 | UInitializing = 1 << 7, // 10000000 54 | NO_STATUS = 0, 55 | }; 56 | 57 | static UrlStatus operator|(UrlStatus a, UrlStatus b); 58 | 59 | static UrlStatus operator&(UrlStatus a, UrlStatus b); 60 | 61 | static UrlStatus operator~(UrlStatus a); 62 | 63 | static UrlStatus operator|=(UrlStatus& a, UrlStatus b); 64 | 65 | struct Speed 66 | { 67 | void SetSpeed(float content); 68 | 69 | float content = 0; 70 | std::string unit; // B/s, KB/s, MB/s, GB/s 71 | }; 72 | 73 | struct WebSpeed 74 | { 75 | Speed downloadSpeed; 76 | Speed uploadSpeed; 77 | }; 78 | 79 | struct BasicInfo 80 | { 81 | std::string savePath; 82 | std::string url; 83 | size_t totalSize; 84 | std::string contentType; 85 | bool supportRange; 86 | size_t totalDownloaded; 87 | std::vector files; 88 | bool isTorrent; 89 | }; 90 | 91 | class Downloader 92 | { 93 | public: 94 | Downloader(const std::string& url, uint8_t numThreads = 8, const std::string& savePath = ""); 95 | 96 | WebSpeed GetSpeed() const; 97 | 98 | ~Downloader(); 99 | 100 | void ForceStart(); 101 | 102 | void AddCallback(std::function callback = nullptr) 103 | { 104 | finishCallback = callback; 105 | } 106 | 107 | void Start(); 108 | 109 | void Pause(); 110 | 111 | BasicInfo GetBasicInfo(); 112 | void Resume(); 113 | 114 | UrlStatus GetStatus() const; 115 | 116 | void Stop(); 117 | 118 | bool IsRunning() const; 119 | 120 | bool IsPaused() const; 121 | 122 | bool IsFinished() const; 123 | 124 | float GetProgress() const; 125 | 126 | private: 127 | struct Chunck 128 | { 129 | size_t start; 130 | size_t end; 131 | size_t size; 132 | std::vector data; 133 | }; 134 | 135 | void GetDownloadBasicInfo(); 136 | 137 | void SpeedMonitor(); 138 | 139 | void Download(); 140 | 141 | void DownloadSingle(int retries = 0); 142 | 143 | bool DownloadMulti(); 144 | 145 | void DownloadChunk(Chunck& chunk, int retries = 0); 146 | 147 | static size_t WriteCallback(void* buffer, size_t size, size_t nitems, std::tuple* parms); 148 | 149 | static size_t WriteData(void* ptr, size_t size, size_t nmemb, std::ofstream* parm); 150 | 151 | static size_t HeaderCallback(char* buffer, size_t size, size_t nitems, void* userdata); 152 | 153 | static int ProgressCallback(void* clientp, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, 154 | curl_off_t ulnow); 155 | 156 | std::atomic curl; 157 | std::vector multiCurl; 158 | std::function finishCallback; 159 | BasicInfo basicInfo; 160 | std::thread downloadThread; 161 | uint8_t numThreads; 162 | WebSpeed speed; 163 | std::chrono::system_clock::time_point startTime; 164 | std::chrono::system_clock::time_point endTime; 165 | // 控制下载状态 166 | std::atomic status = UrlStatus::UInitializing; 167 | std::mutex pauseMutex; 168 | std::condition_variable pauseCondition; 169 | 170 | size_t previousDlnow = 0; 171 | }; 172 | 173 | 174 | #endif //DOWNLOADER_H 175 | -------------------------------------------------------------------------------- /include/Impls/Bots.h: -------------------------------------------------------------------------------- 1 | #ifndef BOTS_H 2 | #define BOTS_H 3 | #include "ChatGPT_Impl.h" 4 | #include "Claude_Impl.h" 5 | #include "Gemini_Impl.h" 6 | #include "LLama_Impl.h" 7 | #include "CustomRule_Impl.h" 8 | #endif //BOTS_H 9 | -------------------------------------------------------------------------------- /include/Impls/ChatGPT_Impl.h: -------------------------------------------------------------------------------- 1 | #ifndef CHATGPT_H 2 | #define CHATGPT_H 3 | 4 | #include "ChatBot.h" 5 | 6 | class ChatGPT : public ChatBot 7 | { 8 | public: 9 | ChatGPT(std::string systemrole); 10 | 11 | ChatGPT(const OpenAIBotCreateInfo& chat_data, std::string systemrole = ""); 12 | 13 | std::string Submit(std::string prompt, size_t timeStamp, std::string role, 14 | std::string convid, float temp, 15 | float top_p, 16 | uint32_t top_k, 17 | float pres_pen, 18 | float freq_pen, bool async) override; 19 | 20 | void Reset() override; 21 | 22 | void Load(std::string name) override; 23 | 24 | void Save(std::string name) override; 25 | 26 | void Del(std::string name) override; 27 | 28 | void Add(std::string name) override; 29 | 30 | map GetHistory() override; 31 | 32 | static std::string Stamp2Time(long long timestamp); 33 | 34 | json history; 35 | std::string sendRequest(std::string data, size_t ts) override; 36 | void BuildHistory(const std::vector>& history) override; 37 | 38 | std::string GetModel() override 39 | { 40 | return chat_data_.model; 41 | } 42 | 43 | protected: 44 | OpenAIBotCreateInfo chat_data_; 45 | std::string mode_name_ = "default"; 46 | std::string convid_ = "default"; 47 | std::map Conversation; 48 | std::mutex print_mutex; 49 | const std::string ConversationPath = "Conversations/"; 50 | const std::string sys = "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally."; 51 | const std::string suffix = ".dat"; 52 | 53 | json defaultJson; 54 | }; 55 | 56 | class GPTLike : public ChatGPT 57 | { 58 | public: 59 | GPTLike(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 60 | { 61 | chat_data_.enable = data.enable; 62 | chat_data_.api_key = data.api_key; 63 | chat_data_.model = data.model; 64 | chat_data_.useWebProxy = true; 65 | chat_data_._endPoint = data.apiHost + data.apiPath; 66 | } 67 | }; 68 | 69 | class Grok : public ChatGPT 70 | { 71 | public: 72 | Grok(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 73 | { 74 | chat_data_.enable = data.enable; 75 | chat_data_.api_key = data.api_key; 76 | chat_data_.model = data.model; 77 | chat_data_.useWebProxy = true; 78 | chat_data_._endPoint = "https://api.x.ai/v1/chat/completions"; 79 | } 80 | }; 81 | 82 | class Mistral : public ChatGPT 83 | { 84 | public: 85 | // 构造函数,接收 GPTLikeCreateInfo 并配置为 Mistral API 86 | Mistral(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 87 | { 88 | // 设置基本参数 89 | chat_data_.enable = data.enable; 90 | chat_data_.api_key = data.api_key; 91 | chat_data_.model = data.model; 92 | chat_data_.useWebProxy = true; 93 | 94 | // 设置 Mistral API 端点 95 | chat_data_._endPoint = "https://api.mistral.ai/v1/chat/completions"; 96 | } 97 | }; 98 | 99 | class TongyiQianwen : public ChatGPT 100 | { 101 | public: 102 | TongyiQianwen(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 103 | { 104 | // 设置基本配置参数 105 | chat_data_.enable = data.enable; 106 | chat_data_.api_key = data.api_key; 107 | chat_data_.model = data.model; 108 | chat_data_.useWebProxy = true; 109 | 110 | // 设置通义千问API端点 111 | chat_data_._endPoint = "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions"; 112 | 113 | // 如果用户提供了自定义端点,则使用自定义端点 114 | if (!data.apiHost.empty()) 115 | { 116 | chat_data_._endPoint = data.apiHost; 117 | } 118 | } 119 | }; 120 | 121 | class SparkDesk : public ChatGPT 122 | { 123 | public: 124 | SparkDesk(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 125 | { 126 | // 设置基本配置参数 127 | chat_data_.enable = data.enable; 128 | chat_data_.api_key = data.api_key; 129 | chat_data_.model = data.model; 130 | chat_data_.useWebProxy = true; 131 | 132 | // 设置星火API端点 133 | chat_data_._endPoint = "https://spark-api-open.xf-yun.com/v1/chat/completions"; 134 | 135 | // 如果用户提供了自定义端点,则使用自定义端点 136 | if (!data.apiHost.empty()) 137 | { 138 | chat_data_._endPoint = data.apiHost; 139 | } 140 | } 141 | }; 142 | 143 | class BaichuanAI : public ChatGPT 144 | { 145 | public: 146 | /** 147 | * 构造函数 148 | * @param data 百川API配置信息 149 | * @param systemrole 系统角色提示信息 150 | */ 151 | BaichuanAI(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 152 | { 153 | // 设置基本配置参数 154 | chat_data_.enable = data.enable; 155 | chat_data_.api_key = data.api_key; 156 | chat_data_.model = data.model; 157 | chat_data_.useWebProxy = true; 158 | 159 | // 设置百川API端点 160 | chat_data_._endPoint = "https://api.baichuan-ai.com/v1/chat/completions"; 161 | 162 | // 如果用户提供了自定义端点,则使用自定义端点 163 | if (!data.apiHost.empty()) 164 | { 165 | chat_data_._endPoint = data.apiHost; 166 | } 167 | } 168 | }; 169 | 170 | class HunyuanAI : public ChatGPT 171 | { 172 | public: 173 | /** 174 | * 构造函数 175 | * @param data 混元API配置信息 176 | * @param systemrole 系统角色提示信息 177 | */ 178 | HunyuanAI(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 179 | { 180 | // 设置基本配置参数 181 | chat_data_.enable = data.enable; 182 | chat_data_.api_key = data.api_key; 183 | chat_data_.model = data.model; 184 | chat_data_.useWebProxy = true; 185 | 186 | // 设置腾讯混元API端点 187 | chat_data_._endPoint = "https://api.hunyuan.cloud.tencent.com/v1/chat/completions"; 188 | 189 | // 如果用户提供了自定义端点,则使用自定义端点 190 | if (!data.apiHost.empty()) 191 | { 192 | chat_data_._endPoint = data.apiHost; 193 | } 194 | } 195 | }; 196 | 197 | class HuoshanAI : public ChatGPT 198 | { 199 | public: 200 | /** 201 | * 构造函数 202 | * @param data 火山API配置信息 203 | * @param systemrole 系统角色提示信息 204 | */ 205 | HuoshanAI(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 206 | { 207 | // 设置基本配置参数 208 | chat_data_.enable = data.enable; 209 | chat_data_.api_key = data.api_key; 210 | chat_data_.model = data.model; 211 | chat_data_.useWebProxy = true; 212 | 213 | // 设置火山API端点 214 | chat_data_._endPoint = "https://ark.cn-beijing.volces.com/api/v3/chat/completions"; 215 | 216 | // 如果用户提供了自定义端点,则使用自定义端点 217 | if (!data.apiHost.empty()) 218 | { 219 | chat_data_._endPoint = data.apiHost; 220 | } 221 | } 222 | }; 223 | 224 | class ChatGLM : public ChatGPT 225 | { 226 | public: 227 | /** 228 | * 构造函数 229 | * @param data ChatGLM API配置信息 230 | * @param systemrole 系统角色提示信息 231 | */ 232 | ChatGLM(const GPTLikeCreateInfo& data, std::string systemrole = ""): ChatGPT(systemrole) 233 | { 234 | // 设置基本配置参数 235 | chat_data_.enable = data.enable; 236 | chat_data_.api_key = data.api_key; 237 | chat_data_.model = data.model; 238 | chat_data_.useWebProxy = true; 239 | 240 | // 设置ChatGLM API端点 241 | chat_data_._endPoint = "https://open.bigmodel.cn/api/paas/v4/chat/completions"; 242 | 243 | // 如果用户提供了自定义端点,则使用自定义端点 244 | if (!data.apiHost.empty()) 245 | { 246 | chat_data_._endPoint = data.apiHost; 247 | } 248 | } 249 | }; 250 | 251 | 252 | #endif //CHATGPT_H 253 | -------------------------------------------------------------------------------- /include/Impls/Claude_Impl.h: -------------------------------------------------------------------------------- 1 | #ifndef CLAUDE_IMPL_H 2 | #define CLAUDE_IMPL_H 3 | 4 | #include "ChatBot.h" 5 | 6 | class ClaudeInSlack : public ChatBot 7 | { 8 | public: 9 | ClaudeInSlack(const ClaudeBotCreateInfo& data) : claudeData(data) 10 | { 11 | } 12 | 13 | std::string Submit(std::string prompt, size_t timeStamp, std::string role = Role::User, 14 | std::string convid = "default", float temp = 0.7f, 15 | float top_p = 0.9f, 16 | uint32_t top_k = 40u, 17 | float pres_pen = 0.0f, 18 | float freq_pen = 0.0f, bool async = false) override; 19 | 20 | void Reset() override;; 21 | 22 | void Load(std::string name) override; 23 | 24 | void Save(std::string name) override; 25 | 26 | void Del(std::string id) override; 27 | 28 | void Add(std::string name) override; 29 | 30 | map GetHistory() override; 31 | 32 | std::string sendRequest(std::string data, size_t ts) override 33 | { 34 | return ""; 35 | } 36 | 37 | std::string GetModel() override 38 | { 39 | return "Claude"; 40 | } 41 | 42 | void BuildHistory(const std::vector>& history) override; 43 | 44 | private: 45 | map ChannelListName; 46 | map ChannelListID; 47 | ClaudeBotCreateInfo claudeData; 48 | }; 49 | 50 | class Claude : public ChatBot 51 | { 52 | public: 53 | // 构造函数 - 带系统角色提示 54 | Claude(std::string systemrole); 55 | 56 | // 构造函数 - 带 Claude API 配置信息 57 | Claude(const ClaudeAPICreateInfo& claude_data, std::string systemrole = ""); 58 | 59 | // 提交用户消息并获取响应 60 | std::string Submit(std::string prompt, size_t timeStamp, std::string role = Role::User, 61 | std::string convid = "default", float temp = 0.7f, 62 | float top_p = 0.9f, 63 | uint32_t top_k = 40u, 64 | float pres_pen = 0.0f, 65 | float freq_pen = 0.0f, bool async = false) override; 66 | 67 | // 重置当前对话 68 | void Reset() override; 69 | 70 | // 加载指定对话 71 | void Load(std::string name) override; 72 | 73 | // 保存当前对话 74 | void Save(std::string name) override; 75 | 76 | // 删除指定对话 77 | void Del(std::string name) override; 78 | 79 | // 添加新对话 80 | void Add(std::string name) override; 81 | 82 | // 获取历史记录 83 | map GetHistory() override; 84 | 85 | // 时间戳转换为可读时间 86 | static std::string Stamp2Time(long long timestamp); 87 | // 发送请求到 Claude API 88 | std::string sendRequest(std::string data, size_t ts) override; 89 | // 历史记录 90 | json history; 91 | 92 | protected: 93 | ClaudeAPICreateInfo claude_data_; 94 | std::string mode_name_ = "default"; 95 | std::string convid_ = "default"; 96 | std::map Conversation; 97 | std::mutex print_mutex; 98 | const std::string ConversationPath = "Conversations/Claude/"; 99 | const std::string sys = "You are Claude, an AI assistant developed by Anthropic. Please respond in Chinese."; 100 | const std::string suffix = ".dat"; 101 | json LastHistory; 102 | json defaultJson; 103 | 104 | // 检查是否已保存 105 | bool IsSaved(); 106 | 107 | // 获取当前时间戳 108 | static long long getCurrentTimestamp(); 109 | 110 | // 获取指定天数前的时间戳 111 | static long long getTimestampBefore(int daysBefore); 112 | 113 | public: 114 | void BuildHistory(const std::vector>& history) override; 115 | 116 | std::string GetModel() override 117 | { 118 | return claude_data_.model; 119 | } 120 | }; 121 | 122 | 123 | #endif //CLAUDE_IMPL_H 124 | -------------------------------------------------------------------------------- /include/Impls/CustomRule_Impl.h: -------------------------------------------------------------------------------- 1 | #ifndef CUSTOMROLE_IMPL_H 2 | #define CUSTOMROLE_IMPL_H 3 | #include "ChatBot.h" 4 | 5 | class JsonPathBuilder 6 | { 7 | private: 8 | json rootJson; 9 | 10 | // 向指定路径添加值 11 | void addValueAtPath(json& jsonObj, const vector& path, const string& value); 12 | 13 | public: 14 | // 添加路径和值 15 | void addPath(const string& pathStr, const string& value); 16 | 17 | // 获取构建的JSON 18 | json getJson(); 19 | }; 20 | 21 | class CustomRule_Impl : public ChatBot 22 | { 23 | private: 24 | CustomRule CustomRuleData; 25 | const std::string ConversationPath = "Conversations/CustomRule/"; 26 | json SystemPrompt; 27 | const std::string suffix = ".dat"; 28 | std::string convid_ = "default"; 29 | std::map Conversation; 30 | json history; 31 | json templateJson; 32 | vector paths; 33 | vector paths2; 34 | std::string sendRequest(std::string data, size_t ts) override; 35 | 36 | json buildRequest(const std::string& prompt, const std::string& role); 37 | 38 | public: 39 | CustomRule_Impl(const CustomRule& data, 40 | std::string systemrole = "You are a ai assistant made by Artiverse Studio."); 41 | std::string Submit(std::string prompt, size_t timeStamp, std::string role, 42 | std::string convid, float temp, 43 | float top_p, 44 | uint32_t top_k, 45 | float pres_pen, 46 | float freq_pen, bool async) override; 47 | void Reset() override; 48 | void Load(std::string name) override; 49 | void Save(std::string name) override; 50 | void Del(std::string name) override; 51 | void Add(std::string name) override; 52 | map GetHistory() override; 53 | void BuildHistory(const std::vector>& history) override; 54 | 55 | 56 | std::string GetModel() override 57 | { 58 | return CustomRuleData.model; 59 | } 60 | }; 61 | 62 | 63 | #endif //CUSTOMROLE_IMPL_H 64 | -------------------------------------------------------------------------------- /include/Impls/Gemini_Impl.h: -------------------------------------------------------------------------------- 1 | #ifndef GEMINI_IMPL_H 2 | #define GEMINI_IMPL_H 3 | 4 | #include "ChatBot.h" 5 | 6 | class Gemini : public ChatBot 7 | { 8 | public: 9 | Gemini(const GeminiBotCreateInfo& data, const std::string sys) : geminiData(data) 10 | { 11 | json _t; 12 | 13 | _t["role"] = "user"; 14 | _t["parts"] = json::array(); 15 | _t["parts"].push_back(json::object()); 16 | _t["parts"][0]["text"] = sys; 17 | 18 | json _t2; 19 | _t2["role"] = "model"; 20 | _t2["parts"] = json::array(); 21 | _t2["parts"].push_back(json::object()); 22 | _t2["parts"][0]["text"] = "Yes I am here to help you."; 23 | SystemPrompt.push_back(_t); 24 | SystemPrompt.push_back(_t2); 25 | } 26 | 27 | std::string sendRequest(std::string data, size_t ts) override; 28 | std::string Submit(std::string prompt, size_t timeStamp, std::string role, 29 | std::string convid, float temp, 30 | float top_p, 31 | uint32_t top_k, 32 | float pres_pen, 33 | float freq_pen, bool async) override; 34 | 35 | void Reset() override; 36 | 37 | void Load(std::string name) override; 38 | 39 | void Save(std::string name) override; 40 | 41 | void Del(std::string name) override; 42 | 43 | void Add(std::string name) override; 44 | 45 | map GetHistory() override { return map(); } 46 | void BuildHistory(const std::vector>& history) override; 47 | 48 | std::string GetModel() override 49 | { 50 | return geminiData.model; 51 | } 52 | 53 | private: 54 | GeminiBotCreateInfo geminiData; 55 | const std::string ConversationPath = "Conversations/Gemini/"; 56 | json SystemPrompt; 57 | const std::string suffix = ".dat"; 58 | std::string convid_ = "default"; 59 | std::map Conversation; 60 | json history; 61 | }; 62 | 63 | #endif //GEMINI_IMPL_H 64 | -------------------------------------------------------------------------------- /include/Impls/LLama_Impl.h: -------------------------------------------------------------------------------- 1 | #ifndef LLAMA_IMPL_H 2 | #define LLAMA_IMPL_H 3 | 4 | 5 | #include "ChatBot.h" 6 | 7 | class LLama : public ChatBot 8 | { 9 | public: 10 | LLama(const LLamaCreateInfo& data, 11 | const std::string& sysr = 12 | "You are LLama, a large language model trained by OpenSource. Respond conversationally."); 13 | 14 | ~LLama(); 15 | 16 | std::string Submit(std::string prompt, size_t timeStamp, std::string role, 17 | std::string convid, float temp, 18 | float top_p, 19 | uint32_t top_k, 20 | float pres_pen, 21 | float freq_pen, bool async) override; 22 | void Reset() override; 23 | void Load(std::string name) override; 24 | void Save(std::string name) override; 25 | void Del(std::string name) override; 26 | void Add(std::string name) override; 27 | map GetHistory() override; 28 | std::string sendRequest(std::string data, size_t ts) override; 29 | 30 | private: 31 | struct ChatMessage 32 | { 33 | std::string role; 34 | std::string content; 35 | 36 | llama_chat_message To(); 37 | }; 38 | 39 | 40 | struct chatInfo 41 | { 42 | std::vector messages; 43 | int prev_len = 0; 44 | 45 | std::vector To(); 46 | }; 47 | 48 | LLamaCreateInfo llamaData; 49 | llama_context* ctx; 50 | llama_model* model; 51 | llama_sampler* smpl; 52 | std::string convid_ = "default"; 53 | const llama_vocab* vocab; 54 | std::vector formatted; 55 | std::string sys = "You are LLama, a large language model trained by OpenSource. Respond conversationally."; 56 | const std::string ConversationPath = "Conversations/"; 57 | const std::string suffix = ".dat"; 58 | std::unordered_map history; 59 | 60 | static uint16_t GetGPUMemory(); 61 | 62 | static uint16_t GetGPULayer(); 63 | 64 | public: 65 | void BuildHistory(const std::vector>& history) override; 66 | 67 | std::string GetModel() override 68 | { 69 | return llamaData.model; 70 | } 71 | }; 72 | 73 | 74 | #endif //LLAMA_IMPL_H 75 | -------------------------------------------------------------------------------- /include/Logger.h: -------------------------------------------------------------------------------- 1 | #ifndef LOGGER_H 2 | #define LOGGER_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | class Logger { 9 | public: 10 | static void Init(); 11 | 12 | static std::shared_ptr &GetCoreLogger() { return s_CoreLogger; } //返回内核日志 13 | 14 | static long long getCurrentTimestamp() { 15 | auto now = std::chrono::system_clock::now(); 16 | auto duration = now.time_since_epoch(); 17 | return std::chrono::duration_cast(duration).count(); 18 | } 19 | 20 | static std::string Stamp2Time(size_t timestamp, bool nospace = false) { 21 | int ms = timestamp % 1000;//取毫秒 22 | time_t tick = (time_t) (timestamp / 1000);//转换时间 23 | struct tm tm; 24 | char s[40]; 25 | tm = *localtime(&tick); 26 | if (!nospace) { 27 | strftime(s, sizeof(s), "%Y-%m-%d %H:%M:%S", &tm); 28 | } else { 29 | strftime(s, sizeof(s), "%Y%m%d%H%M%S", &tm); 30 | } 31 | std::string str(s); 32 | str = str + " " + std::to_string(ms); 33 | return str; 34 | } 35 | 36 | private: 37 | inline static std::shared_ptr s_CoreLogger; //内核日志 38 | }; 39 | 40 | //日志 41 | #define LogTrace(...) SPDLOG_LOGGER_TRACE(::Logger::GetCoreLogger(), __VA_ARGS__) 42 | #define LogInfo(...) SPDLOG_LOGGER_INFO(::Logger::GetCoreLogger(), __VA_ARGS__) 43 | #define LogWarn(...) SPDLOG_LOGGER_WARN(::Logger::GetCoreLogger(), __VA_ARGS__) 44 | #define LogError(...) SPDLOG_LOGGER_ERROR(::Logger::GetCoreLogger(), __VA_ARGS__) 45 | #define LogFatal(...) SPDLOG_LOGGER_CRITICAL(::Logger::GetCoreLogger(), __VA_ARGS__) 46 | 47 | 48 | #endif -------------------------------------------------------------------------------- /include/Progress.hpp: -------------------------------------------------------------------------------- 1 | #ifndef PROGRESS_H 2 | #define PROGRESS_H 3 | 4 | #include "utils.h" 5 | 6 | class ProgressBar { 7 | public: 8 | ProgressBar(int total, int barWidth = 70, char symbol = '=') : total_(total), barWidth_(barWidth), symbol_(symbol) { 9 | startTime_ = std::chrono::high_resolution_clock::now(); 10 | } 11 | 12 | void update(int progress, std::string customData = "", std::string unit = "B") { 13 | double percentage = (double) progress / total_ * 100; 14 | auto currentTime = std::chrono::high_resolution_clock::now(); 15 | double elapsedTime = 16 | std::chrono::duration_cast(currentTime - startTime_).count() / 1000.0; 17 | double speed = progress / elapsedTime; 18 | std::stringstream ss; 19 | ss << std::setprecision(1) << std::fixed << percentage << "%|"; 20 | int pos = static_cast(barWidth_ * percentage / 100); 21 | for (int i = 0; i < barWidth_; ++i) { 22 | if (i < pos) ss << symbol_; 23 | else if (i == pos) ss << ">"; 24 | else ss << " "; 25 | } 26 | ss << "| speed:" << formatSize(speed) << "/s"; 27 | if (!customData.empty()) { 28 | ss << " " << customData; 29 | } 30 | std::cout << "\r" << ss.str() << std::flush; 31 | } 32 | 33 | void end() { 34 | std::cout << std::endl; 35 | } 36 | 37 | static std::string formatSize(double size) { 38 | std::string units[] = {"B", "KB", "MB", "GB", "TB"}; 39 | int unitIndex = 0; 40 | while (size >= 1024 && unitIndex < 4) { 41 | size /= 1024; 42 | unitIndex++; 43 | } 44 | std::stringstream ss; 45 | ss << std::setprecision(1) << std::fixed << size << units[unitIndex]; 46 | return ss.str(); 47 | } 48 | 49 | static std::string formatTime(double seconds) { 50 | int hours = static_cast(seconds / 3600); 51 | int minutes = static_cast(seconds / 60) % 60; 52 | int secs = static_cast(seconds) % 60; 53 | std::stringstream ss; 54 | if (hours > 0) { 55 | ss << std::setfill('0') << std::setw(2) << hours << ":"; 56 | } 57 | ss << std::setfill('0') << std::setw(2) << minutes << ":" << std::setfill('0') << std::setw(2) << secs; 58 | return ss.str(); 59 | } 60 | 61 | private: 62 | int total_; 63 | int barWidth_; 64 | char symbol_; 65 | std::chrono::time_point startTime_; 66 | 67 | 68 | }; 69 | 70 | #endif -------------------------------------------------------------------------------- /include/Recorder.h: -------------------------------------------------------------------------------- 1 | #ifndef RECORDER_H 2 | #define RECORDER_H 3 | 4 | #include "utils.h" 5 | 6 | class Recorder 7 | { 8 | public: 9 | const std::string filepath = "recorded.wav"; 10 | 11 | std::atomic silentTimer = 0; 12 | const std::atomic SILENT_TIMEOUT = 2000; 13 | 14 | Recorder(int sampleRate, int framesPerBuffer); 15 | 16 | ~Recorder(); 17 | 18 | void startRecording(); 19 | 20 | void stopRecording(bool del = false); 21 | 22 | std::vector getRecordedData() const; 23 | 24 | void saveToWav(const std::string& fileName = "recorded.wav"); 25 | 26 | void ChangeDevice(std::string device); 27 | 28 | int sampleRate; 29 | 30 | static int recordCallback(const void* inputBuffer, void* outputBuffer, unsigned long framesPerBuffer, 31 | const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, 32 | void* userData); 33 | 34 | std::vector Devices; 35 | 36 | private: 37 | int deviceId = -1; 38 | std::string device; 39 | static const int SAMPLE_RATE = 16000; 40 | int framesPerBuffer; 41 | PaStream* stream; 42 | 43 | std::vector recordedData; 44 | }; 45 | 46 | class Listener 47 | { 48 | public: 49 | Listener(int sampleRate, int framesPerBuffer); 50 | 51 | ~Listener(); 52 | 53 | void listen(std::string file = "recorded0.wav"); 54 | 55 | void EndListen(bool save = true); 56 | 57 | bool IsRecorded();; 58 | 59 | void ResetRecorded(); 60 | 61 | void playRecorded(const std::string& filePath, bool islisten = true); 62 | 63 | void changeFile(std::string filename); 64 | 65 | void ChangeDevice(std::string device); 66 | 67 | std::vector getRecordedData(); 68 | 69 | void ChangeTaskPath(std::string path); 70 | 71 | private: 72 | std::string taskPath; 73 | std::atomic isRecorded = false; 74 | bool run = true; 75 | std::shared_ptr recorder; 76 | std::vector recordedData; 77 | std::mutex recordedData_mutex; // 互斥锁 78 | friend int Recorder::recordCallback(const void* inputBuffer, void* outputBuffer, unsigned long framesPerBuffer, 79 | const PaStreamCallbackTimeInfo* timeInfo, PaStreamCallbackFlags statusFlags, 80 | void* userData); 81 | }; 82 | 83 | class Audio 84 | { 85 | public: 86 | static void playRecordedAudio(const std::vector& audioData); 87 | }; 88 | 89 | #endif 90 | -------------------------------------------------------------------------------- /include/Script.h: -------------------------------------------------------------------------------- 1 | #ifndef SCRIPTINTERPRETER_H 2 | #define SCRIPTINTERPRETER_H 3 | 4 | #include 5 | 6 | class Script { 7 | public: 8 | friend class ScriptManager; 9 | 10 | Script(); 11 | 12 | ~Script(); 13 | 14 | bool Initialize(std::string scriptsPath); 15 | 16 | sol::protected_function getFunction(const std::string&funcName); 17 | 18 | template 19 | sol::object Invoke(const std::string&funcName, Args&&... args); 20 | 21 | bool checkFunc(const std::string&funcName); 22 | 23 | void PrintAllFunctions(); 24 | 25 | static std::shared_ptr