├── .dockerignore ├── .env.exp ├── .github ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ └── bug-report.md ├── release-drafter.yml ├── sponsor_ohmygpt.png └── workflows │ ├── docker-ci-dev.yml │ ├── docker-ci.yaml.hint │ ├── publish.yml │ ├── python_test.yml.ignore │ └── release_draft.yml ├── .gitignore ├── .nerve.toml ├── .pre-commit-config.yaml ├── CITATION.cff ├── Dockerfile ├── LICENSE ├── NOTICE.MD ├── README.md ├── app ├── __init__.py ├── _exception.py ├── components │ ├── __init__.py │ ├── credential.py │ └── user_manager │ │ └── __init__.py ├── const.py ├── middleware │ ├── __init__.py │ └── llm_task.py ├── receiver │ ├── __init__.py │ ├── app.py │ ├── aps.py │ ├── discord │ │ └── __init__.py │ ├── function.py │ ├── kook │ │ ├── __init__.py │ │ └── http_client.py │ ├── receiver_client.py │ ├── schema.py │ ├── slack │ │ ├── __init__.py │ │ └── creat_message.py │ ├── telegram │ │ └── __init__.py │ └── util_func.py ├── schema.py ├── sender │ ├── __init__.py │ ├── app.py │ ├── discord │ │ ├── __init__.py │ │ └── event.py │ ├── kook │ │ ├── __init__.py │ │ └── event.py │ ├── schema.py │ ├── slack │ │ ├── __init__.py │ │ ├── event.py │ │ └── schema.py │ ├── telegram │ │ ├── __init__.py │ │ └── event.py │ └── util_func.py ├── setting │ ├── __init__.py │ ├── database.py │ ├── discord.py │ ├── kook.py │ ├── rabbitmq.py │ ├── slack.py │ ├── telegram.py │ └── whitelist.py └── tutorial.py ├── deploy.sh ├── docker-compose.yml ├── docs ├── SeriveProvider.svg ├── chain_chat.gif ├── code_interpreter_func.gif ├── dev_note │ ├── chat_start.md │ ├── hook.md │ ├── time.md │ └── tool_call_restart.md ├── note │ ├── app.md │ ├── func_call.md │ ├── refer.md │ └── resign.md ├── project_cover.png ├── schema_exp │ └── openai_response.py ├── sticker_func.gif ├── test_script │ ├── bilibili.py │ ├── database │ │ ├── note_rabbit_connect.py │ │ ├── note_rabbit_pika_usage.py │ │ ├── note_rabbitmq_receiver.py │ │ └── note_redis_lpush_usage.py │ ├── discord.py │ ├── duckduck.py │ ├── exp_from_nonebot.py │ ├── fake_plugin │ │ └── __init__.py │ ├── func_call.py │ ├── funtion.py │ ├── inpoint.py │ ├── note_emoji_regex.py │ ├── note_entry_point.py │ ├── note_github_bot.py │ ├── note_github_bot_test.py │ ├── note_kook_usage.py │ ├── note_match_re.py │ ├── note_openai_req.py │ ├── note_pydantic_alias_usage.py │ ├── note_pydantic_class_or_subclass.py │ ├── note_rss_parser.py │ ├── pydantic_debug.py │ ├── pydantic_feat.py │ ├── pydantic_function.py │ ├── pydantic_mo.py │ ├── survey_arg_parse.py │ ├── survey_arg_parser.py │ ├── survey_chatglm_tokenzier.py │ ├── survey_khl_lib.py │ ├── test_choice_in_sh.sh │ ├── trash_cluster_usage.py │ ├── trash_mongodb_class.py │ ├── trash_transfer_note.py │ └── web_craw_note │ │ ├── note_unstructured.py │ │ └── note_web_sumy.py ├── timer_func.gif ├── translate_file_func.gif └── vision.gif ├── llmkira ├── __init__.py ├── _exception.py ├── cache │ ├── __init__.py │ ├── elara_runtime.py │ ├── lmdb_runtime.py │ ├── redis_runtime.py │ └── runtime_schema.py ├── doc_manager │ └── __init__.py ├── extra │ ├── plugins │ │ ├── __init__.py │ │ ├── alarm │ │ │ └── __init__.py │ │ ├── e2b_code_interpreter │ │ │ └── __init__.py │ │ └── search │ │ │ ├── __init__.py │ │ │ └── engine.py │ ├── voice │ │ └── __init__.py │ └── voice_hook.py ├── kv_manager │ ├── __init__.py │ ├── _base.py │ ├── env.py │ ├── file.py │ ├── instruction.py │ ├── time.py │ └── tool_call.py ├── logic │ └── __init__.py ├── memory │ ├── __init__.py │ ├── _base.py │ ├── local_storage.py │ └── redis_storage │ │ ├── LICENSE │ │ ├── __init__.py │ │ └── utils.py ├── openai │ ├── __init__.py │ ├── _excption.py │ ├── cell.py │ ├── request.py │ └── utils.py ├── openapi │ ├── __init__.py │ ├── fuse │ │ └── __init__.py │ ├── hook │ │ └── __init__.py │ └── trigger │ │ ├── __init__.py │ │ └── default_trigger.py ├── sdk │ ├── __init__.py │ ├── tools │ │ ├── LICENSE │ │ ├── __init__.py │ │ ├── error.py │ │ ├── loader.py │ │ ├── model.py │ │ ├── register.py │ │ └── schema.py │ └── utils.py └── task │ ├── __init__.py │ ├── schema.py │ └── snapshot │ ├── __init__.py │ ├── _base.py │ └── local.py ├── pdm.lock ├── playground ├── hooks.py ├── jsonf.py └── token.py ├── pm2.json ├── pyproject.toml ├── start.sh ├── start_receiver.py ├── start_sender.py ├── start_tutorial.py └── tests └── pydantic_error.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .env 2 | test 3 | run.log 4 | docs 5 | .git 6 | /data/ 7 | /dist/ 8 | /mongodb/ 9 | /rabbitmq 10 | /redis/ 11 | /llmkira/redis/ 12 | /llmkira/mongodb/ 13 | /docs/test/ 14 | /jobs.sqlite 15 | /config_dir/*.pem 16 | /config_dir/*.secret/ -------------------------------------------------------------------------------- /.env.exp: -------------------------------------------------------------------------------- 1 | # NOTE:If you want to share your bot to **everyone** 2 | # GLOBAL_OAI_KEY=sk-xxx 3 | GLOBAL_OAI_MODEL=gpt-3.5-turbo 4 | GLOBAL_OAI_TOOL_MODEL=gpt-3.5-turbo 5 | # GLOBAL_OAI_ENDPOINT=https://api.openai.com/v1/ 6 | 7 | AMQP_DSN=amqp://admin:8a8a8a@localhost:5672/ 8 | 9 | # NOTE:NOT MUST,OR USE local file database # # # 10 | # REDIS_DSN=redis://localhost:6379/0 11 | # MONGODB_DSN=mongodb://admin:8a8a8a@localhost:27017/?authSource=admin 12 | # NOT MUST # # # 13 | 14 | # NOTE:When you have too much message, you can set it. 15 | # STOP_REPLY=anything 16 | 17 | # NOTE:sentry logger 18 | # SENTRY_DSN=xxxx 19 | 20 | # TELEGRAM_BOT_TOKEN=xxx 21 | # TELEGRAM_BOT_PROXY_ADDRESS=socks5://127.0.0.1:7890 22 | 23 | # DISCORD_BOT_TOKEN=xxx 24 | # DISCORD_BOT_PROXY_ADDRESS=socks5://127.0.0.1:7890 25 | # DISCORD_BOT_PREFIX=! 26 | 27 | # KOOK_BOT_TOKEN=xxx 28 | 29 | # SLACK_APP_TOKEN=xapp-*** 30 | # SLACK_BOT_TOKEN=xoxb-*** 31 | # SLACK_SIGNING_SECRET=xxxxxxxxxxxxx 32 | # SLACK_BOT_PROXY_ADDRESS=http:// 33 | 34 | # PLUGIN_* 35 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | [EN](#en) | [CN](#cn) 2 | ------------------- 3 | 4 | # [How to contribute](#en) 5 | 6 | We welcome everyone to contribute to the project. If you would like to contribute to the project, please read the 7 | following. 8 | 9 | ## Community Contact 10 | 11 | - Ask any issues directly on Github. 12 | - Join our Telegram group: https://t.me/Openai_LLM 13 | 14 | ## Branch description 15 | 16 | - Our `main` branch is the default release branch, please do not submit code directly to this branch. 17 | - Our `dev` branch is the development branch, if you want to contribute to the project, please submit code to this 18 | branch, we will merge the `dev` branch into the `main` branch regularly. 19 | - Our documentation is published at https://github.com/LlmKira/Docs and any form of contribution is accepted. 20 | - Our framework packages are published on pypi, changes to the sdk will only trigger Release CI when a new OpenAPI is 21 | released. 22 | 23 | ## CI/CD 24 | 25 | Our CI/CD service is run by `GitHub Actions`, and every commit of `dev` triggers the CI/CD process or manually triggered 26 | by the Release Manager. `main` branch consists of 27 | Release 28 | Manager or Manager triggered manually. 29 | 30 | ## Content specifications 31 | 32 | - Do not submit personal information. 33 | - Please use the PEP8 specification for naming. 34 | - The formatting operation is completed by Reviewer, so there is no need to worry about formatting issues. 35 | - Make sure all commits are atomic (one feature at a time). 36 | - We use pydantic>2.0.0 for data verification. You can submit the 1.0.0 version of the code (this is highly 37 | discouraged), but we will upgrade it to when released. 38 | 2.0.0. 39 | - Fixed Logger needs to be printed at the head or tail of the function of this layer, not at the calling statement 40 | level. Loggers that do not conform to the specifications need to be deleted after debugging. 41 | - The printed content of Logger is concise and clear. It starts with English capital letters and does not require 42 | punctuation at the end. Do not use `:` to separate, use `--` 43 | Separate parameters. 44 | - It is recommended to use `assert` in the function header for parameter verification, and do not use `if` for parameter 45 | verification. 46 | - If it is not a reading function, please throw an exception if the execution fails and do not return `None`. 47 | - Issues must be marked with `# TODO` or `# FIXME`, and add `# [Issue Number]` after `# TODO` or `# FIXME`, if there is 48 | no Issue 49 | Number, please create an Issue when submitting. 50 | - Please do not use `str | None` `:=` `list[dict]` and other new features. 51 | 52 | ## Compatibility instructions 53 | 54 | Our code needs to be compatible with Python 3.8+, please do not use new features such as `str | None` `:=` `list[dict]` 55 | and so on. 56 | Of course, if you want to use new features, we also welcome your contributions. Release Manager will do compatibility 57 | checks when releasing new versions. 58 | 59 | ## Add plugins to the registry 60 | 61 | If you want to add your plugin to the registry, please submit the `llmkira/external/plugin.py` file update, we will 62 | automatically 63 | synchronize to the registry and test through CI. 64 | 65 | --------------- 66 | 67 | # [如何贡献](#cn) 68 | 69 | 我们欢迎每一个人为项目做出贡献。如果你想要为项目做出贡献,请阅读以下内容。 70 | 71 | ## 社区联系 72 | 73 | - 直接在Github上提出任何问题。 74 | - 加入我们的Telegram群组:https://t.me/Openai_LLM 75 | 76 | ## 分支说明 77 | 78 | - 我们的 `main` 分支为默认发布分支,请勿向此分支直接提交代码。 79 | - 我们的 `dev` 分支是开发分支,如果你想要为项目做出贡献,请向此分支提交代码,我们会定期将 `dev` 分支合并到 `main` 分支。 80 | - 我们的文档发布在 https://github.com/LlmKira/Docs ,接受任意形式的贡献。 81 | - 我们的框架包发布于 pypi,对 sdk 的更改只会在发布新的 OpenAPI 时才会触发 Release CI。 82 | 83 | ## CI/CD 84 | 85 | 我们的 CI/CD 服务由 `GitHub Actions` 运行,`dev` 的每一次提交都会触发 CI/CD 流程或 Release Manager 手动触发。`main` 分支由 86 | Release 87 | Manager 或 Manager 手动触发。 88 | 89 | ## 内容规范 90 | 91 | - 不要提交个人信息。 92 | - 命名请使用 PEP8 规范。 93 | - 格式化操作由 Reviewer 完成,不需要担心格式化问题。 94 | - 确保所有提交都是原子的(每次提交一个功能)。 95 | - 我们使用 pydantic>2.0.0 来进行数据校验,你可以提交 1.0.0 版本的代码(非常不建议这样做),但是我们会在发布时将其升级到 96 | 2.0.0。 97 | - 固定式 Logger 需要在本层函数的头部或尾部进行打印,不要在调用语句层打印。调试后不合规范的 Logger 需要删除。 98 | - Logger 的打印内容简洁明了,使用英文大写字母开头,结尾不需要标点符号,不要使用 `:` 分隔,使用 `--` 99 | 分隔参数。 100 | - 推荐在函数头部使用 `assert` 进行参数校验,不要使用 `if` 进行参数校验。 101 | - 如果不是读取函数,执行失败请抛出异常,不要返回 `None`。 102 | - 问题必须注明 `# TODO` 或 `# FIXME`,并且在 `# TODO` 或 `# FIXME` 后面加上 `# [Issue Number]`,如果没有 Issue 103 | Number,请在提交时创建 Issue。 104 | - 请不要使用 `str | None` `:=` `list[dict]` 等新特性。 105 | 106 | - 错误示例 107 | 108 | ```python 109 | cache = global_cache_runtime.get_redis() 110 | if not cache: 111 | raise Exception("Redis not connected") 112 | ``` 113 | 114 | - 正确示例 115 | 116 | ```python 117 | cache = global_cache_runtime.get_redis() 118 | # cache 由函数 get_redis() 返回,raise Exception("Redis not connected") 会抛出异常,不需要返回 None 119 | ``` 120 | 121 | ```python 122 | cache = dict().get("cache") 123 | assert cache, "Redis not connected" 124 | ``` 125 | 126 | ## 兼容说明 127 | 128 | 我们的代码需要兼容 Python 3.8+,请不要使用 `str | None` `:=` `list[dict]` 等新特性。 129 | 当然如果你想要使用新特性,我们也欢迎你的贡献,Release Manager 会在发布新版本时做兼容性检查。 130 | 131 | ## 添加插件到注册表 132 | 133 | 如果你想要将你的插件添加到注册表,请提交 `llmkira/external/plugin.py` 文件更新,我们会自动通过 CI 同步到注册表并测试。 134 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: Bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **Log** 14 | ``` 15 | ``` 16 | - [ ] **Secret in the Log has been deleted** 17 | 18 | **Info** 19 | Version: 20 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'Release v$RESOLVED_VERSION 🌈' 2 | tag-template: 'v$RESOLVED_VERSION' 3 | template: | 4 | $CHANGES 5 | categories: 6 | - title: '🚀 Features' 7 | labels: 8 | - 'New Features' 9 | - 'Support New Platform' 10 | - title: '🐛 Bug Fixes' 11 | labels: 12 | - 'fix' 13 | - 'bugfix' 14 | - 'Level High' 15 | - 'Bug' 16 | - 'Bugfix' 17 | - title: '🧰 Maintenance' 18 | labels: 19 | - 'Broken Change' 20 | - 'PluginApi' 21 | - 'Slow progress' 22 | - 'Improve our documentation' 23 | - title: '🎮 New Plugin' 24 | labels: 25 | - 'New plugin' 26 | 27 | change-template: '- $TITLE @$AUTHOR (#$NUMBER)' 28 | change-title-escapes: '\<*_&' 29 | version-resolver: 30 | major: 31 | labels: 32 | - 'major' 33 | minor: 34 | labels: 35 | - 'minor' 36 | patch: 37 | labels: 38 | - 'patch' 39 | default: patch 40 | -------------------------------------------------------------------------------- /.github/sponsor_ohmygpt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/.github/sponsor_ohmygpt.png -------------------------------------------------------------------------------- /.github/workflows/docker-ci-dev.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image CI (dev) 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - docker 8 | # - develop 9 | # - '**-develop' 10 | 11 | jobs: 12 | build-and-push-image: 13 | if: contains('["sudoskys"]', github.actor) 14 | runs-on: ubuntu-latest 15 | timeout-minutes: 60 16 | permissions: 17 | contents: read 18 | packages: write 19 | 20 | steps: 21 | - name: Checkout repository 22 | uses: actions/checkout@v3 23 | 24 | - name: Set up QEMU 25 | uses: docker/setup-qemu-action@v3 26 | - name: Set up Docker Buildx 27 | uses: docker/setup-buildx-action@v3 28 | 29 | - name: Log in to the Container registry 30 | uses: docker/login-action@v2.1.0 31 | with: 32 | username: ${{ secrets.DOCKER_USERNAME }} 33 | password: ${{ secrets.DOCKER_PASSWORD }} 34 | 35 | - name: Extract metadata (tags, labels) for Docker 36 | id: meta 37 | uses: docker/metadata-action@v4.1.1 38 | with: 39 | images: | 40 | sudoskys/llmbot 41 | tags: | 42 | # set latest tag for default branch 43 | type=raw,value=latest 44 | type=ref,event=branch 45 | 46 | flavors: | 47 | latest=false 48 | suffix=-dev,onlatest=true 49 | 50 | - name: echo 51 | run: echo ${{ steps.meta.outputs.json }} 52 | 53 | - name: Build and push Docker image 54 | uses: docker/build-push-action@v4 55 | with: 56 | platforms: linux/amd64,linux/arm64 57 | context: . 58 | push: true 59 | tags: ${{ steps.meta.outputs.tags }} 60 | labels: ${{ steps.meta.outputs.labels }} 61 | cache-from: type=registry,ref=${{ steps.meta.outputs.tags }} 62 | cache-to: type=inline 63 | -------------------------------------------------------------------------------- /.github/workflows/docker-ci.yaml.hint: -------------------------------------------------------------------------------- 1 | name: Docker Image CI (stable) 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | tags: 9 | - '**' 10 | release: 11 | types: [ published ] 12 | 13 | jobs: 14 | build-and-push-image: 15 | if: contains('["sudoskys"]', github.actor) 16 | runs-on: ubuntu-latest 17 | timeout-minutes: 60 18 | permissions: 19 | contents: read 20 | packages: write 21 | 22 | steps: 23 | - name: Checkout repository 24 | uses: actions/checkout@v3 25 | 26 | - name: Set up QEMU 27 | uses: docker/setup-qemu-action@v3 28 | - name: Set up Docker Buildx 29 | uses: docker/setup-buildx-action@v3 30 | 31 | - name: Log in to the Container registry 32 | uses: docker/login-action@v2.1.0 33 | with: 34 | username: ${{ secrets.DOCKER_USERNAME }} 35 | password: ${{ secrets.DOCKER_PASSWORD }} 36 | 37 | - name: Extract metadata (tags, labels) for Docker 38 | id: meta 39 | uses: docker/metadata-action@v4.1.1 40 | with: 41 | images: | 42 | sudoskys/llmbot 43 | tags: | 44 | # set latest tag for default branch 45 | type=raw,value=latest 46 | type=ref,event=tag 47 | type=ref,event=branch 48 | 49 | flavors: | 50 | latest=auto 51 | suffix=-stable,onlatest=true 52 | 53 | - name: echo 54 | run: echo ${{ steps.meta.outputs.json }} 55 | 56 | - name: Build and push Docker image 57 | uses: docker/build-push-action@v4 58 | with: 59 | platforms: linux/amd64,linux/arm64 60 | context: . 61 | push: true 62 | tags: ${{ steps.meta.outputs.tags }} 63 | labels: ${{ steps.meta.outputs.labels }} 64 | cache-from: type=registry,ref=${{ steps.meta.outputs.tags }} 65 | cache-to: type=inline 66 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: publish 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - pypi_* 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | pypi-publish: 14 | name: upload release to PyPI 15 | runs-on: ubuntu-latest 16 | permissions: 17 | # IMPORTANT: this permission is mandatory for trusted publishing 18 | id-token: write 19 | steps: 20 | - uses: actions/checkout@v3 21 | 22 | - uses: pdm-project/setup-pdm@v3 23 | 24 | - name: Publish package distributions to PyPI 25 | run: pdm publish 26 | -------------------------------------------------------------------------------- /.github/workflows/python_test.yml.ignore: -------------------------------------------------------------------------------- 1 | name: Run tests 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | - dev 9 | - develop 10 | - '**-develop' 11 | 12 | jobs: 13 | Testing: 14 | runs-on: ${{ matrix.os }} 15 | strategy: 16 | matrix: 17 | python-version: [ '3.9', '3.10', '3.11' ] 18 | os: [ ubuntu-latest ] # , windows-latest, macos-latest ] 19 | 20 | steps: 21 | - uses: actions/checkout@v3 22 | - name: Set up PDM 23 | uses: pdm-project/setup-pdm@v3 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - name: Install dependencies 28 | run: | 29 | pdm install --frozen-lockfile -G bot 30 | - name: Run Tests 31 | run: | 32 | pdm run -v pytest tests 33 | -------------------------------------------------------------------------------- /.github/workflows/release_draft.yml: -------------------------------------------------------------------------------- 1 | name: release-draft 2 | 3 | on: 4 | pull_request_target: 5 | branches: 6 | - main 7 | types: 8 | - closed 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | draft_release: 15 | if: github.event_name == 'pull_request_target' 16 | runs-on: ubuntu-latest 17 | permissions: 18 | contents: write 19 | pull-requests: write 20 | steps: 21 | - uses: release-drafter/release-drafter@v5 22 | id: release-drafter 23 | env: 24 | GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Config/*.toml 2 | .idea 3 | *.log 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | /llmkira/redis/ 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | .python-version 88 | 89 | # pipenv 90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 93 | # install all needed dependencies. 94 | #Pipfile.lock 95 | 96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 97 | __pypackages__/ 98 | 99 | # Celery stuff 100 | celerybeat-schedule 101 | celerybeat.pid 102 | 103 | # SageMath parsed files 104 | *.sage.py 105 | 106 | # Environments 107 | .env 108 | .venv 109 | env/ 110 | venv/ 111 | ENV/ 112 | env.bak/ 113 | venv.bak/ 114 | 115 | # Spyder project provider_settings 116 | .spyderproject 117 | .spyproject 118 | 119 | # Rope project provider_settings 120 | .ropeproject 121 | 122 | # mkdocs documentation 123 | /site 124 | 125 | # mypy 126 | .mypy_cache/ 127 | .dmypy.json 128 | dmypy.json 129 | 130 | # Pyre type checker 131 | .pyre/ 132 | /data/ 133 | /mongodb/ 134 | /redis/ 135 | 136 | # Ignore dynaconf secret files 137 | .secrets.* 138 | /rabbitmq/ 139 | /llmkira/elara.db 140 | config_dir/*.toml 141 | config_dir/*.db 142 | /memray-*.bin 143 | /memray-*.html 144 | /docs/test_script/sticker/ 145 | /jobs.sqlite 146 | config_dir/*.pem 147 | config_dir/*.secret/ 148 | /.pdm-python 149 | /.montydb/ 150 | /.snapshot/ 151 | /.tutorial.db 152 | app/.tutorial.db 153 | -------------------------------------------------------------------------------- /.nerve.toml: -------------------------------------------------------------------------------- 1 | contributor = "b4e0cd99-289b-4586-992c-6f159c436101" 2 | # https://github.com/LlmKira/contributor/blob/main/.nerve.toml 3 | 4 | language = "English" 5 | issue_auto_label = true 6 | # Whether to automatically label issues 7 | issue_title_format = true 8 | # Whether to use the default issue title format 9 | issue_body_format = false 10 | # Whether to use the default issue body format 11 | issue_close_with_report = true 12 | # Whether to close the issue with a report 13 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v3.2.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-yaml 8 | - id: check-added-large-files 9 | 10 | - repo: https://github.com/astral-sh/ruff-pre-commit 11 | # Ruff version. 12 | rev: v0.1.7 13 | hooks: 14 | # Run the linter. 15 | - id: ruff 16 | args: [ --fix ] 17 | # Run the formatter. 18 | - id: ruff-format 19 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - family-names: "sudoskys" 5 | title: "Openaibot" 6 | version: 2.0.4 7 | url: "https://github.com/LlmKira/Openaibot" 8 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 第一个阶段 2 | FROM python:3.9-buster as builder 3 | 4 | RUN apt update && \ 5 | apt install -y build-essential && \ 6 | pip install -U pip setuptools wheel && \ 7 | pip install pdm && \ 8 | apt install -y ffmpeg 9 | 10 | COPY pyproject.toml pdm.lock README.md /project/ 11 | WORKDIR /project 12 | RUN pdm sync -G bot --prod --no-editable 13 | 14 | # 第二个阶段 15 | FROM python:3.9-slim-buster as runtime 16 | 17 | RUN apt update && \ 18 | apt install -y npm && \ 19 | npm install pm2 -g && \ 20 | apt install -y ffmpeg && \ 21 | pip install pdm 22 | 23 | VOLUME ["/redis", "/rabbitmq", "/mongodb", "/run.log", ".cache",".montydb",".snapshot"] 24 | 25 | WORKDIR /app 26 | COPY --from=builder /project/.venv /app/.venv 27 | 28 | COPY pm2.json ./ 29 | COPY . /app 30 | 31 | CMD [ "pm2-runtime", "pm2.json" ] 32 | -------------------------------------------------------------------------------- /NOTICE.MD: -------------------------------------------------------------------------------- 1 | # NOTICE OF THIS PROJECT 2 | 3 | ``` 4 | Copyright (C) 2023 sudoskys 5 | 6 | This program is free software: you can redistribute it and/or modify 7 | it under the terms of the GNU General Public License as published by 8 | the Free Software Foundation, either version 3 of the License, or 9 | (at your option) any later version. 10 | 11 | This program is distributed in the hope that it will be useful, 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | GNU General Public License for more details. 15 | 16 | You should have received a copy of the GNU General Public License 17 | along with this program. If not, see . 18 | ``` 19 | 20 | ## MIT License 21 | 22 | The MIT license applies to the files in: 23 | 24 | - folder: "llmkira/sdk/func_calling" from https://github.com/nonebot/nonebot2 25 | - folder: "llmkira/sdk/memory/redis" from https://github.com/langchain-ai/langchain 26 | - code: "llmkira/sdk/schema.py@parse_from_pydantic" from https://github.com/jxnl/instructor 27 | -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/app/__init__.py -------------------------------------------------------------------------------- /app/_exception.py: -------------------------------------------------------------------------------- 1 | class ChainBuildException(Exception): 2 | pass 3 | -------------------------------------------------------------------------------- /app/components/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from app.components.credential import Credential 4 | from app.components.user_manager import USER_MANAGER 5 | 6 | 7 | async def read_user_credential(user_id: str) -> Optional[Credential]: 8 | user = await USER_MANAGER.read(user_id=user_id) 9 | return user.credential 10 | -------------------------------------------------------------------------------- /app/components/credential.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import requests 4 | from dotenv import load_dotenv 5 | from loguru import logger 6 | from pydantic import BaseModel 7 | 8 | 9 | class ProviderError(Exception): 10 | pass 11 | 12 | 13 | class Credential(BaseModel): 14 | api_key: str 15 | api_endpoint: str 16 | api_model: str 17 | api_tool_model: str = "gpt-4o-mini" 18 | 19 | @classmethod 20 | def from_provider(cls, token, provider_url): 21 | """ 22 | 使用 token POST 请求 provider_url 获取用户信息 23 | :param token: 用户 token 24 | :param provider_url: provider url 25 | :return: 用户信息 26 | :raises HTTPError: 请求失败 27 | :raises JSONDecodeError: 返回数据解析失败 28 | :raises ProviderError: provider 返回错误信息 29 | """ 30 | response = requests.post(provider_url, data={"token": token}) 31 | response.raise_for_status() 32 | user_data = response.json() 33 | if user_data.get("error"): 34 | raise ProviderError(user_data["error"]) 35 | return cls( 36 | api_key=user_data["api_key"], 37 | api_endpoint=user_data["api_endpoint"], 38 | api_model=user_data["api_model"], 39 | api_tool_model=user_data.get("api_tool_model", "gpt-4o-mini"), 40 | ) 41 | 42 | 43 | load_dotenv() 44 | 45 | if os.getenv("GLOBAL_OAI_KEY") and os.getenv("GLOBAL_OAI_ENDPOINT"): 46 | logger.warning("\n\n**Using GLOBAL credential**\n\n") 47 | global_credential = Credential( 48 | api_key=os.getenv("GLOBAL_OAI_KEY"), 49 | api_endpoint=os.getenv("GLOBAL_OAI_ENDPOINT"), 50 | api_model=os.getenv("GLOBAL_OAI_MODEL", "gpt-4o-mini"), 51 | api_tool_model=os.getenv("GLOBAL_OAI_TOOL_MODEL", "gpt-4o-mini"), 52 | ) 53 | else: 54 | global_credential = None 55 | -------------------------------------------------------------------------------- /app/components/user_manager/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2024/2/8 下午10:56 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | import time 7 | from typing import Optional 8 | 9 | from loguru import logger 10 | from pydantic import BaseModel 11 | 12 | from app.components.credential import Credential 13 | from app.const import DBNAME 14 | from llmkira.doc_manager import global_doc_client 15 | 16 | 17 | class ChatCost(BaseModel): 18 | user_id: str 19 | cost_token: int = 0 20 | endpoint: str = "" 21 | cost_model: str = "" 22 | produce_time: int = time.time() 23 | 24 | 25 | class GenerateHistory(object): 26 | def __init__(self, db_name: str = DBNAME, collection: str = "cost_history"): 27 | """ """ 28 | self.client = global_doc_client.update_db_collection( 29 | db_name=db_name, collection_name=collection 30 | ) 31 | 32 | async def save(self, history: ChatCost): 33 | return self.client.insert_one(history.model_dump(mode="json")) 34 | 35 | 36 | class User(BaseModel): 37 | user_id: str 38 | last_use_time: int = time.time() 39 | credential: Optional[Credential] = None 40 | 41 | 42 | class UserManager(object): 43 | def __init__(self, db_name: str = DBNAME, collection: str = "user"): 44 | """ """ 45 | self.client = global_doc_client.update_db_collection( 46 | db_name=db_name, collection_name=collection 47 | ) 48 | 49 | async def read(self, user_id: str) -> User: 50 | user_id = str(user_id) 51 | database_read = self.client.find_one({"user_id": user_id}) 52 | if not database_read: 53 | logger.info(f"Create new user: {user_id}") 54 | return User(user_id=user_id) 55 | # database_read.update({"user_id": user_id}) 56 | return User.model_validate(database_read) 57 | 58 | async def save(self, user_model: User): 59 | user_model = user_model.model_copy(update={"last_use_time": int(time.time())}) 60 | # 如果存在记录则更新 61 | if self.client.find_one({"user_id": user_model.user_id}): 62 | return self.client.update_one( 63 | {"user_id": user_model.user_id}, 64 | {"$set": user_model.model_dump(mode="json")}, 65 | ) 66 | # 如果不存在记录则插入 67 | else: 68 | return self.client.insert_one(user_model.model_dump(mode="json")) 69 | 70 | 71 | COST_MANAGER = GenerateHistory() 72 | USER_MANAGER = UserManager() 73 | 74 | 75 | async def record_cost( 76 | user_id: str, cost_token: int, endpoint: str, cost_model: str, success: bool = True 77 | ): 78 | try: 79 | await COST_MANAGER.save( 80 | ChatCost( 81 | user_id=user_id, 82 | produce_time=int(time.time()), 83 | endpoint=endpoint, 84 | cost_model=cost_model, 85 | cost_token=cost_token if success else 0, 86 | ) 87 | ) 88 | except Exception as exc: 89 | logger.error(f"🔥 record_cost error: {exc}") 90 | 91 | 92 | if __name__ == "__main__": 93 | pass 94 | -------------------------------------------------------------------------------- /app/const.py: -------------------------------------------------------------------------------- 1 | DBNAME = "openai_bot:" 2 | -------------------------------------------------------------------------------- /app/middleware/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/21 上午12:12 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | -------------------------------------------------------------------------------- /app/receiver/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午8:37 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | from dotenv import load_dotenv 7 | 8 | load_dotenv() 9 | -------------------------------------------------------------------------------- /app/receiver/app.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午8:38 3 | from dotenv import load_dotenv 4 | from loguru import logger 5 | 6 | load_dotenv() 7 | 8 | __area__ = "receiver" 9 | 10 | 11 | # import nest_asyncio 12 | # nest_asyncio.apply() 13 | 14 | 15 | def run(): 16 | import asyncio 17 | from .aps import aps_start 18 | from .function import FunctionReceiver 19 | from llmkira import ( 20 | load_plugins, 21 | load_from_entrypoint, 22 | get_entrypoint_plugins, 23 | ) 24 | from app.setting import PlatformSetting 25 | 26 | start_setting = PlatformSetting.from_subdir() 27 | func = [ 28 | aps_start(), 29 | FunctionReceiver().function(), 30 | ] 31 | 32 | if start_setting.telegram: 33 | from .telegram import TelegramReceiver 34 | 35 | func.append(TelegramReceiver().telegram()) 36 | if start_setting.discord: 37 | from .discord import DiscordReceiver 38 | 39 | func.append(DiscordReceiver().discord()) 40 | if start_setting.kook: 41 | from .kook import KookReceiver 42 | 43 | func.append(KookReceiver().kook()) 44 | if start_setting.slack: 45 | from .slack import SlackReceiver 46 | 47 | func.append(SlackReceiver().slack()) 48 | 49 | async def _main(_func): 50 | await asyncio.gather(*_func) 51 | 52 | # 导入插件 53 | load_plugins("llmkira/extra/plugins") 54 | load_from_entrypoint("llmkira.extra.plugin") 55 | import llmkira.extra.voice_hook # noqa 56 | 57 | loaded_message = "\n >>".join(get_entrypoint_plugins()) 58 | logger.success( 59 | f"\n===========Third Party Plugins Loaded==========\n >>{loaded_message}" 60 | ) 61 | loop = asyncio.get_event_loop() 62 | loop.run_until_complete(_main(func)) 63 | -------------------------------------------------------------------------------- /app/receiver/aps.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/18 下午8:34 3 | # @Author : sudoskys 4 | # @File : aps.py 5 | # @Software: PyCharm 6 | from pathlib import Path 7 | 8 | from apscheduler.executors.pool import ThreadPoolExecutor 9 | from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore 10 | from apscheduler.schedulers.asyncio import AsyncIOScheduler 11 | from loguru import logger 12 | import pytz 13 | from tzlocal import get_localzone 14 | timezone = pytz.timezone(get_localzone().key) 15 | Path(".cache").mkdir(exist_ok=True) 16 | jobstores = {"default": SQLAlchemyJobStore(url="sqlite:///.cache/aps.db")} 17 | executors = {"default": ThreadPoolExecutor(20)} 18 | job_defaults = {"coalesce": False, "max_instances": 3} 19 | 20 | SCHEDULER = AsyncIOScheduler( 21 | job_defaults=job_defaults, timezone=timezone, executors=executors, jobstores=jobstores 22 | ) 23 | 24 | 25 | async def aps_start(): 26 | logger.success("Receiver Runtime:APS Timer start") 27 | SCHEDULER.start() 28 | -------------------------------------------------------------------------------- /app/receiver/kook/http_client.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/20 下午5:33 3 | # @Author : sudoskys 4 | # @File : http_client.py 5 | # @Software: PyCharm 6 | import requests 7 | 8 | 9 | class KookHttpClient(object): 10 | def __init__(self, token): 11 | self.base_url = "https://www.kookapp.cn" 12 | self.bot_token = token 13 | 14 | def request(self, method, url, data=None): 15 | headers = {"Authorization": f"Bot {self.bot_token}"} 16 | response = requests.request( 17 | method, f"{self.base_url}{url}", headers=headers, json=data 18 | ) 19 | return response.json() 20 | 21 | def create_channel_message(self, target_id, content, quote=None): 22 | data = {"type": 1, "target_id": target_id, "content": content, "quote": quote} 23 | return self.request("POST", "/api/v3/message/create", data) 24 | 25 | def create_direct_message(self, target_id, content, quote=None): 26 | data = {"type": 1, "target_id": target_id, "content": content, "quote": quote} 27 | return self.request("POST", "/api/v3/direct-message/create", data) 28 | -------------------------------------------------------------------------------- /app/receiver/schema.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/17 下午10:02 3 | # @Author : sudoskys 4 | # @File : schema.py 5 | # @Software: PyCharm 6 | -------------------------------------------------------------------------------- /app/receiver/slack/creat_message.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/21 下午8:02 3 | # @Author : sudoskys 4 | # @File : creat_message.py 5 | # @Software: PyCharm 6 | from enum import Enum 7 | from typing import Union 8 | 9 | import emoji 10 | 11 | 12 | class SlackEmoji(Enum): 13 | robot = ":robot_face:" 14 | check = ":white_check_mark:" 15 | error = ":x:" 16 | pin = ":pushpin:" 17 | thumbs_up = ":thumbsup:" 18 | thumbs_down = ":thumbsdown:" 19 | eyes = ":eyes:" 20 | gear = ":gear:" 21 | pencil = ":pencil:" 22 | moai = ":moai:" 23 | telescope = ":telescope:" 24 | hammer = ":hammer:" 25 | warning = ":warning:" 26 | 27 | 28 | class ChatMessageCreator(object): 29 | WELCOME_BLOCK = { 30 | "type": "section", 31 | "text": { 32 | "type": "mrkdwn", 33 | "text": ( 34 | "Welcome to Slack! :wave: We're so glad you're here. :blush:\n\n" 35 | "*Get started by completing the steps below:*" 36 | ), 37 | }, 38 | } 39 | DIVIDER_BLOCK = {"type": "divider"} 40 | 41 | def __init__( 42 | self, 43 | channel, 44 | user_name: str = None, 45 | thread_ts: str = None, 46 | ): 47 | self.channel = channel 48 | self.username = user_name if user_name else "OAIbot" 49 | self.icon_emoji = ":robot_face:" 50 | self.timestamp = "" 51 | self.thread_ts = thread_ts 52 | self.blocks = [] 53 | 54 | @staticmethod 55 | def build_block(text, msg_type: str = "section"): 56 | """ 57 | create a section block 58 | """ 59 | _block = { 60 | "type": msg_type, 61 | "text": { 62 | "type": "mrkdwn", 63 | "text": text, 64 | }, 65 | } 66 | return _block 67 | 68 | def update_content(self, message_text): 69 | """ 70 | Message text in markdown format 71 | """ 72 | self.blocks.append(self.build_block(message_text)) 73 | return self 74 | 75 | def update_emoji(self, emoji_name: Union[str, SlackEmoji]): 76 | if isinstance(emoji_name, SlackEmoji): 77 | self.icon_emoji = emoji_name.value 78 | return self 79 | emoji_name = emoji_name.strip() 80 | if ":" in emoji_name: 81 | self.icon_emoji = emoji_name 82 | else: 83 | _emoji = emoji.demojize(emoji_name) 84 | if _emoji.endswith(":"): 85 | self.icon_emoji = _emoji 86 | return self 87 | 88 | def get_message_payload(self, message_text=None): 89 | if not self.blocks: 90 | raise ValueError("Message cannot be empty") 91 | _arg = { 92 | "ts": self.timestamp, 93 | "channel": self.channel, 94 | "username": self.username, 95 | "icon_emoji": self.icon_emoji, 96 | "blocks": self.blocks, 97 | } 98 | if self.thread_ts: 99 | _arg["thread_ts"] = self.thread_ts 100 | if message_text: 101 | _arg["text"] = message_text 102 | return _arg 103 | -------------------------------------------------------------------------------- /app/receiver/util_func.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/17 下午10:02 3 | # @Author : sudoskys 4 | # @File : util_func.py 5 | # @Software: PyCharm 6 | -------------------------------------------------------------------------------- /app/schema.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from pydantic import Field, BaseModel, ConfigDict 4 | 5 | 6 | class Event(BaseModel): 7 | thead_uuid: str = Field(description="Thead UUID") 8 | by_platform: str = Field(description="创建平台") 9 | by_user: str = Field(description="创建用户") 10 | expire_at: int = Field(default=60 * 60 * 24 * 1, description="expire") 11 | create_at: int = Field( 12 | default_factory=lambda: int(time.time()), description="created_times" 13 | ) 14 | model_config = ConfigDict(extra="ignore", arbitrary_types_allowed=False) 15 | -------------------------------------------------------------------------------- /app/sender/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午8:27 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | 7 | from dotenv import load_dotenv 8 | 9 | load_dotenv() 10 | -------------------------------------------------------------------------------- /app/sender/app.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午8:18 3 | # @Author : sudoskys 4 | # @File : app.py 5 | # @Software: PyCharm 6 | 7 | from dotenv import load_dotenv 8 | from loguru import logger 9 | 10 | from llmkira import load_from_entrypoint, get_entrypoint_plugins 11 | 12 | load_dotenv() 13 | __area__ = "sender" 14 | 15 | # import nest_asyncio 16 | # nest_asyncio.apply() 17 | 18 | 19 | def run(): 20 | import asyncio 21 | 22 | from llmkira import load_plugins 23 | from app.setting import PlatformSetting 24 | 25 | start_setting = PlatformSetting.from_subdir() 26 | wait_list = [] 27 | if start_setting.telegram: 28 | from .telegram import TelegramBotRunner 29 | 30 | wait_list.append(TelegramBotRunner().run()) 31 | if start_setting.discord: 32 | from .discord import DiscordBotRunner 33 | 34 | wait_list.append(DiscordBotRunner().run()) 35 | if start_setting.kook: 36 | from .kook import KookBotRunner 37 | 38 | wait_list.append(KookBotRunner().run()) 39 | if start_setting.slack: 40 | from .slack import SlackBotRunner 41 | 42 | wait_list.append(SlackBotRunner().run()) 43 | 44 | # 初始化插件系统 45 | load_plugins("llmkira/extra/plugins") 46 | load_from_entrypoint("llmkira.extra.plugin") 47 | loaded_message = "\n >>".join(get_entrypoint_plugins()) 48 | logger.success( 49 | f"\n===========Third Party Plugins Loaded==========\n >>{loaded_message}" 50 | ) 51 | 52 | async def _main(wait_list_): 53 | await asyncio.gather(*wait_list_) 54 | 55 | loop = asyncio.get_event_loop() 56 | loop.run_until_complete(_main(wait_list_=wait_list)) 57 | -------------------------------------------------------------------------------- /app/sender/discord/event.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/19 下午6:30 3 | # @Author : sudoskys 4 | # @File : event.py 5 | # @Software: PyCharm 6 | from app.setting.discord import BotSetting 7 | 8 | 9 | def help_message(): 10 | return """ 11 | `{prefix}chat` - Chat with me :) 12 | `{prefix}task` - Ask me do things with `func_enable` 13 | 14 | **Slash Command** 15 | `/help` - **You just did it :)** 16 | `/tool` - Check all useful tools 17 | `/clear` - wipe memory of your chat 18 | `/auth` - activate a task (my power) 19 | `/login` - set credential 20 | `/logout` - clear credential 21 | `/login_via_url` - login via url 22 | `/env` - set environment variable, split by ; , use `/env ENV=NONE` to disable a env. 23 | `/learn` - set your system prompt, reset by `/learn reset` 24 | 25 | **Please confirm that that bot instance is secure, some plugins may be dangerous on unsafe instance.** 26 | """.format(prefix=BotSetting.prefix) 27 | -------------------------------------------------------------------------------- /app/sender/kook/event.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/19 下午6:30 3 | # @Author : sudoskys 4 | # @File : event.py 5 | # @Software: PyCharm 6 | from app.setting.discord import BotSetting 7 | 8 | _upload_error_message_template = [ 9 | "I cant upload file {filename} to server {error}", 10 | "we cant upload {filename}:( , because {error}...", 11 | "Seems like you're having a bit of a problem uploading {filename}\n`{error}`", 12 | "just cant upload {filename} to server {error} 💍", 13 | "I dont know why, but I cant upload {filename} to server {error}", 14 | ":( I dont want to upload {filename} to server\n `{error}`", 15 | "{error}, {filename} 404", 16 | "OMG, {filename} ,ERROR UPLOAD, `{error}`", 17 | "WTF, I CANT UPLOAD {filename} BECAUSE `{error}`", 18 | "MY PHONE IS BROKEN, I CANT UPLOAD {filename} BECAUSE `{error}`", 19 | "As a human, I can't upload {filename} for you :( \n `{error}`", 20 | ] 21 | 22 | 23 | class MappingDefault(dict): 24 | def __missing__(self, key): 25 | return key 26 | 27 | 28 | def help_message(): 29 | return """ 30 | `{prefix}chat` - Chat with me :) 31 | `{prefix}task` - Ask me do things with `func_enable` 32 | 33 | **Slash Command** 34 | `/help` - **You just did it :)** 35 | `/tool` - Check all useful tools 36 | `/clear` - wipe memory of your chat 37 | `/auth` - activate a task (my power) 38 | `/login` - set credential 39 | `/logout` - clear credential 40 | `/login_via_url` - login via provider url 41 | `/env` - set environment variable, split by ; , use `/env ENV=NONE` to disable a env. 42 | `/learn` - set your system prompt, reset by `/learn reset` 43 | 44 | **Please confirm that that bot instance is secure, some plugins may be dangerous on unsafe instance.** 45 | """.format(prefix=BotSetting.prefix) 46 | -------------------------------------------------------------------------------- /app/sender/schema.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/17 下午10:02 3 | # @Author : sudoskys 4 | # @File : schema.py 5 | # @Software: PyCharm 6 | from abc import abstractmethod, ABC 7 | from typing import List 8 | 9 | from llmkira.openapi.hook import run_hook, Trigger 10 | from llmkira.task.schema import EventMessage, Sign 11 | 12 | 13 | class Runner(ABC): 14 | @staticmethod 15 | async def hook(platform: str, messages: List[EventMessage], sign: Sign) -> tuple: 16 | """ 17 | :param platform: 平台 18 | :param messages: 消息 19 | :param sign: 签名 20 | :return: 平台,消息,文件列表 21 | """ 22 | arg, kwarg = await run_hook( 23 | Trigger.SENDER, 24 | platform=platform, 25 | messages=messages, 26 | sign=sign, 27 | ) 28 | platform = kwarg.get("platform", platform) 29 | messages = kwarg.get("messages", messages) 30 | sign = kwarg.get("sign", sign) 31 | return platform, messages, sign 32 | 33 | @abstractmethod 34 | async def upload(self, *args, **kwargs): 35 | ... 36 | 37 | async def transcribe(self, *args, **kwargs) -> List[EventMessage]: 38 | ... 39 | 40 | @abstractmethod 41 | def run(self, *args, **kwargs): 42 | ... 43 | -------------------------------------------------------------------------------- /app/sender/slack/event.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/21 下午2:29 3 | # @Author : sudoskys 4 | # @File : event.py 5 | # @Software: PyCharm 6 | 7 | from pydantic import ConfigDict, BaseModel 8 | 9 | 10 | def help_message(): 11 | return """ 12 | *Command* 13 | 14 | !chat - chat with me in a serious way :( 15 | @ - Chat with me in a simple way :) 16 | !task - chat with function_enable 17 | !ask - chat with function_disable 18 | !auth - Auth a task 19 | 20 | *Slash Command* 21 | 22 | `/help` - Help message 23 | `/tool` - Tool list 24 | `/clear` - forget...you 25 | `/auth` - activate a task (my power),but outside the thread 26 | `/login` - login via url or raw 27 | `/logout` - clear credential 28 | `/env` - set environment variable, split by ; , use `/env ENV=NONE` to disable a env. 29 | `/learn` - set your system prompt, reset by `/learn reset` 30 | 31 | Make sure you invite me before you call me in channel, wink~ 32 | 33 | *DONT SHARE YOUR TOKEN/ENDPOINT PUBLIC!* 34 | 35 | - Please confirm that that bot instance is secure, some plugins may be dangerous on unsafe instance. 36 | """ 37 | 38 | 39 | class SlashCommand(BaseModel): 40 | """ 41 | https://api.slack.com/interactivity/slash-commands#app_command_handling 42 | token=gIkuvaNzQIHg97ATvDxqgjtO 43 | &team_id=T0001 44 | &team_domain=example 45 | &enterprise_id=E0001 46 | &enterprise_name=Globular%20Construct%20Inc 47 | &channel_id=C2147483705 48 | &channel_name=test 49 | &user_id=U2147483697 50 | &user_name=Steve 51 | &command=/weather 52 | &text=94070 53 | &response_url=https://hooks.slack.com/commands/1234/5678 54 | &trigger_id=13345224609.738474920.8088930838d88f008e0 55 | &api_app_id=A123456 56 | """ 57 | 58 | token: str = None 59 | team_id: str = None 60 | team_domain: str = None 61 | enterprise_id: str = None 62 | enterprise_name: str = None 63 | channel_id: str = None 64 | channel_name: str = None 65 | user_id: str = None 66 | user_name: str = None 67 | command: str = None 68 | text: str = None 69 | response_url: str = None 70 | trigger_id: str = None 71 | api_app_id: str = None 72 | model_config = ConfigDict(extra="allow") 73 | 74 | 75 | class SlackChannelInfo(BaseModel): 76 | id: str = None 77 | name: str = None 78 | is_channel: bool = None 79 | is_group: bool = None 80 | is_im: bool = None 81 | is_mpim: bool = None 82 | is_private: bool = None 83 | created: int = None 84 | is_archived: bool = None 85 | is_general: bool = None 86 | unlinked: int = None 87 | name_normalized: str = None 88 | is_shared: bool = None 89 | is_org_shared: bool = None 90 | is_pending_ext_shared: bool = None 91 | pending_shared: list = None 92 | context_team_id: str = None 93 | updated: int = None 94 | parent_conversation: str = None 95 | creator: str = None 96 | is_ext_shared: bool = None 97 | shared_team_ids: list = None 98 | pending_connected_team_ids: list = None 99 | is_member: bool = None 100 | last_read: str = None 101 | topic: dict = None 102 | purpose: dict = None 103 | previous_names: list = None 104 | model_config = ConfigDict(extra="allow") 105 | -------------------------------------------------------------------------------- /app/sender/slack/schema.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/9 下午2:05 3 | # @Author : sudoskys 4 | # @File : schema.py 5 | # @Software: PyCharm 6 | from typing import List, Optional 7 | 8 | from pydantic import ConfigDict, BaseModel, Field 9 | 10 | 11 | class SlackFile(BaseModel): 12 | id: Optional[str] = Field(None, description="id") 13 | created: int = Field(None, description="created") 14 | timestamp: int = Field(None, description="timestamp") 15 | name: Optional[str] = Field(None, description="name") 16 | title: Optional[str] = Field(None, description="title") 17 | mimetype: Optional[str] = Field(None, description="mimetype") 18 | filetype: Optional[str] = Field(None, description="filetype") 19 | pretty_type: Optional[str] = Field(None, description="pretty_type") 20 | user: Optional[str] = Field(None, description="user") 21 | user_team: Optional[str] = Field(None, description="user_team") 22 | editable: bool = Field(None, description="editable") 23 | size: int = Field(None, description="size") 24 | mode: Optional[str] = Field(None, description="mode") 25 | is_external: bool = Field(None, description="is_external") 26 | external_type: Optional[str] = Field(None, description="external_type") 27 | is_public: bool = Field(None, description="is_public") 28 | public_url_shared: bool = Field(None, description="public_url_shared") 29 | display_as_bot: bool = Field(None, description="display_as_bot") 30 | username: Optional[str] = Field(None, description="username") 31 | url_private: Optional[str] = Field(None, description="url_private") 32 | url_private_download: Optional[str] = Field( 33 | None, description="url_private_download" 34 | ) 35 | media_display_type: Optional[str] = Field(None, description="media_display_type") 36 | thumb_64: Optional[str] = Field(None, description="thumb_64") 37 | thumb_80: Optional[str] = Field(None, description="thumb_80") 38 | thumb_360: Optional[str] = Field(None, description="thumb_360") 39 | thumb_360_w: int = Field(None, description="thumb_360_w") 40 | thumb_360_h: int = Field(None, description="thumb_360_h") 41 | thumb_160: Optional[str] = Field(None, description="thumb_160") 42 | original_w: int = Field(None, description="original_w") 43 | original_h: int = Field(None, description="original_h") 44 | thumb_tiny: Optional[str] = Field(None, description="thumb_tiny") 45 | permalink: Optional[str] = Field(None, description="permalink") 46 | permalink_public: Optional[str] = Field(None, description="permalink_public") 47 | has_rich_preview: bool = Field(None, description="has_rich_preview") 48 | file_access: Optional[str] = Field(None, description="file_access") 49 | 50 | 51 | class SlackMessageEvent(BaseModel): 52 | """ 53 | https://api.slack.com/events/message.im 54 | """ 55 | 56 | client_msg_id: Optional[str] = Field(None, description="client_msg_id") 57 | type: Optional[str] = Field(None, description="type") 58 | text: Optional[str] = Field(None, description="text") 59 | user: Optional[str] = Field(None, description="user") 60 | ts: Optional[str] = Field(None, description="ts") 61 | blocks: List[dict] = Field([], description="blocks") 62 | team: Optional[str] = Field(None, description="team") 63 | thread_ts: Optional[str] = Field(None, description="thread_ts") 64 | parent_user_id: Optional[str] = Field(None, description="parent_user_id") 65 | channel: Optional[str] = Field(None, description="channel") 66 | event_ts: Optional[str] = Field(None, description="event_ts") 67 | channel_type: Optional[str] = Field(None, description="channel_type") 68 | files: List[SlackFile] = Field(default=[], description="files") 69 | model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow") 70 | -------------------------------------------------------------------------------- /app/sender/telegram/event.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/7/10 下午9:40 3 | # @Author : sudoskys 4 | # @File : event.py 5 | # @Software: PyCharm 6 | 7 | 8 | def help_message(): 9 | return """ 10 | # Command List 11 | 12 | `/help` - show help message 13 | `/chat` - just want to chat with me 14 | `/task` - chat with function_enable 15 | `/ask` - chat with function_disable 16 | `/tool` - check all useful tools 17 | `/clear` - clear the chat history 18 | `/auth` - auth the tool_call 19 | `/learn` - set your system prompt, reset by `/learn reset` 20 | 21 | **Private Chat Only** 22 | 23 | `/login` - login via url or something 24 | `/logout` - clear credential 25 | `/env` - set v-env split by ; , use `/env ENV=NONE` to disable a env. 26 | 27 | > Please confirm that that bot instance is secure, some plugins may be dangerous on unsafe instance, wink~ 28 | """ 29 | -------------------------------------------------------------------------------- /app/setting/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/18 上午12:49 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | from pydantic import ConfigDict, BaseModel 7 | 8 | from .discord import BotSetting as DiscordSetting 9 | from .kook import BotSetting as KookSetting 10 | from .slack import BotSetting as SlackSetting 11 | from .telegram import BotSetting as TelegramSetting 12 | 13 | 14 | class PlatformSetting(BaseModel): 15 | """ 16 | 平台列表 17 | """ 18 | 19 | discord: bool = False 20 | kook: bool = False 21 | slack: bool = False 22 | telegram: bool = False 23 | model_config = ConfigDict(from_attributes=True) 24 | 25 | @classmethod 26 | def from_subdir(cls): 27 | return cls( 28 | discord=DiscordSetting.available, 29 | kook=KookSetting.available, 30 | slack=SlackSetting.available, 31 | telegram=TelegramSetting.available, 32 | ) 33 | -------------------------------------------------------------------------------- /app/setting/database.py: -------------------------------------------------------------------------------- 1 | from dotenv import load_dotenv 2 | from httpx import AsyncClient 3 | from loguru import logger 4 | from pydantic import Field, model_validator 5 | from pydantic_settings import BaseSettings, SettingsConfigDict 6 | 7 | from llmkira.sdk.utils import sync 8 | 9 | global_httpx_client = AsyncClient(timeout=180) 10 | 11 | 12 | class RabbitMQ(BaseSettings): 13 | """ 14 | 代理设置 15 | """ 16 | 17 | amqp_dsn: str = Field( 18 | "amqp://admin:8a8a8a@localhost:5672", validation_alias="AMQP_DSN" 19 | ) 20 | """RabbitMQ 配置""" 21 | 22 | model_config = SettingsConfigDict( 23 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 24 | ) 25 | 26 | @model_validator(mode="after") 27 | def is_connect(self): 28 | from aio_pika import connect_robust 29 | 30 | try: 31 | sync(connect_robust(url=self.amqp_dsn)) 32 | except Exception as e: 33 | logger.exception( 34 | f"\n⚠️ RabbitMQ DISCONNECT, pls set AMQP_DSN in .env\n--error {e} \n--dsn {self.amqp_dsn}" 35 | ) 36 | raise e 37 | else: 38 | logger.success("🍩 RabbitMQ Connect Success") 39 | if self.amqp_dsn == "amqp://admin:8a8a8a@localhost:5672": 40 | logger.warning( 41 | "\n⚠️ You Are Using The Default RabbitMQ Password" 42 | "\nMake Sure You Handle The Port `5672` And Set Firewall Rules" 43 | ) 44 | return self 45 | 46 | @property 47 | def task_server(self): 48 | return self.amqp_dsn 49 | 50 | 51 | load_dotenv() 52 | RabbitMQSetting = RabbitMQ() 53 | -------------------------------------------------------------------------------- /app/setting/discord.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/18 下午10:23 3 | # @Author : sudoskys 4 | # @File : discord.py 5 | # @Software: PyCharm 6 | from typing import Optional 7 | 8 | from dotenv import load_dotenv 9 | from loguru import logger 10 | from pydantic import Field, model_validator 11 | from pydantic_settings import BaseSettings, SettingsConfigDict 12 | 13 | 14 | class DiscordBot(BaseSettings): 15 | """ 16 | 代理设置 17 | """ 18 | 19 | token: Optional[str] = Field( 20 | None, validation_alias="DISCORD_BOT_TOKEN", strict=True 21 | ) 22 | prefix: Optional[str] = Field("/", validation_alias="DISCORD_BOT_PREFIX") 23 | proxy_address: Optional[str] = Field( 24 | None, validation_alias="DISCORD_BOT_PROXY_ADDRESS" 25 | ) # "all://127.0.0.1:7890" 26 | bot_id: Optional[str] = Field(None) 27 | model_config = SettingsConfigDict( 28 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 29 | ) 30 | 31 | @model_validator(mode="after") 32 | def bot_setting_validator(self): 33 | if self.token is None: 34 | logger.info("🍀Discord Bot Token Not Set") 35 | if self.proxy_address: 36 | logger.success(f"DiscordBot proxy was set to {self.proxy_address}") 37 | return self 38 | 39 | @property 40 | def available(self): 41 | return self.token is not None 42 | 43 | 44 | load_dotenv() 45 | BotSetting = DiscordBot() 46 | -------------------------------------------------------------------------------- /app/setting/kook.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/20 下午5:59 3 | # @Author : sudoskys 4 | # @File : kook.py 5 | # @Software: PyCharm 6 | from typing import Optional 7 | 8 | from dotenv import load_dotenv 9 | from loguru import logger 10 | from pydantic import Field, model_validator 11 | from pydantic_settings import BaseSettings, SettingsConfigDict 12 | 13 | 14 | class KookBot(BaseSettings): 15 | """ 16 | 代理设置 17 | """ 18 | 19 | token: Optional[str] = Field(None, validation_alias="KOOK_BOT_TOKEN") 20 | model_config = SettingsConfigDict( 21 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 22 | ) 23 | 24 | @model_validator(mode="after") 25 | def bot_setting_validator(self): 26 | if self.token is None: 27 | logger.info("🍀Kook Bot Token Not Set") 28 | return self 29 | 30 | @property 31 | def available(self): 32 | return self.token is not None 33 | 34 | 35 | load_dotenv() 36 | BotSetting = KookBot() 37 | -------------------------------------------------------------------------------- /app/setting/rabbitmq.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/14 上午11:39 3 | # @Author : sudoskys 4 | # @File : rabbitmq.py 5 | # @Software: PyCharm 6 | from dotenv import load_dotenv 7 | from loguru import logger 8 | from pydantic import Field, PrivateAttr, model_validator 9 | from pydantic_settings import BaseSettings, SettingsConfigDict 10 | 11 | from llmkira.sdk.utils import sync 12 | 13 | 14 | class RabbitMQ(BaseSettings): 15 | """ 16 | 代理设置 17 | """ 18 | 19 | amqp_dsn: str = Field( 20 | "amqp://admin:8a8a8a@localhost:5672", validation_alias="AMQP_DSN" 21 | ) 22 | _verify_status: bool = PrivateAttr(default=False) 23 | model_config = SettingsConfigDict( 24 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 25 | ) 26 | 27 | @model_validator(mode="after") 28 | def is_connect(self): 29 | import aio_pika 30 | 31 | try: 32 | sync(aio_pika.connect_robust(self.amqp_dsn)) 33 | except Exception as e: 34 | self._verify_status = False 35 | logger.exception( 36 | f"\n⚠️ RabbitMQ DISCONNECT, pls set AMQP_DSN in .env\n--error {e} \n--dsn {self.amqp_dsn}" 37 | ) 38 | raise e 39 | else: 40 | self._verify_status = True 41 | logger.success("🍩 RabbitMQ Connect Success") 42 | if self.amqp_dsn == "amqp://admin:8a8a8a@localhost:5672": 43 | logger.warning( 44 | "\n⚠️ You Are Using The Default RabbitMQ Password" 45 | "\nMake Sure You Handle The Port `5672` And Set Firewall Rules" 46 | ) 47 | return self 48 | 49 | @property 50 | def available(self): 51 | return self._verify_status 52 | 53 | @property 54 | def task_server(self): 55 | return self.amqp_dsn 56 | 57 | 58 | load_dotenv() 59 | RabbitMQSetting = RabbitMQ() 60 | -------------------------------------------------------------------------------- /app/setting/slack.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/21 下午2:14 3 | # @Author : sudoskys 4 | # @File : slack.py 5 | # @Software: PyCharm 6 | from typing import Optional 7 | 8 | from dotenv import load_dotenv 9 | from loguru import logger 10 | from pydantic import Field, model_validator 11 | from pydantic_settings import BaseSettings, SettingsConfigDict 12 | 13 | 14 | class SlackBot(BaseSettings): 15 | """ 16 | 代理设置 17 | """ 18 | 19 | app_token: Optional[str] = Field(None, validation_alias="SLACK_APP_TOKEN") 20 | # https://api.slack.com/apps 21 | 22 | bot_token: Optional[str] = Field(None, validation_alias="SLACK_BOT_TOKEN") 23 | # https://api.slack.com/apps/XXXX/oauth? 24 | 25 | secret: Optional[str] = Field(None, validation_alias="SLACK_SIGNING_SECRET") 26 | # https://api.slack.com/authentication/verifying-requests-from-slack#signing_secrets_admin_page 27 | 28 | proxy_address: Optional[str] = Field( 29 | None, validation_alias="SLACK_BOT_PROXY_ADDRESS" 30 | ) # "all://127.0.0.1:7890" 31 | bot_id: Optional[str] = Field(None) 32 | bot_username: Optional[str] = Field(None) 33 | model_config = SettingsConfigDict( 34 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 35 | ) 36 | 37 | @model_validator(mode="after") 38 | def bot_setting_validator(self): 39 | try: 40 | if self.app_token is None: 41 | raise ValueError("🍀SlackBot `app_token` Not Set") 42 | if self.bot_token is None: 43 | raise LookupError("\n🍀SlackBot `bot_token` is empty") 44 | if self.secret is None: 45 | raise LookupError("\n🍀SlackBot `secret` is empty") 46 | except ValueError as e: 47 | logger.info(str(e)) 48 | except LookupError as e: 49 | logger.warning(str(e)) 50 | return self 51 | 52 | @property 53 | def available(self): 54 | return all([self.app_token, self.bot_token, self.secret]) 55 | 56 | 57 | load_dotenv() 58 | BotSetting = SlackBot() 59 | -------------------------------------------------------------------------------- /app/setting/telegram.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午8:22 3 | # @Author : sudoskys 4 | # @File : telegram.py 5 | # @Software: PyCharm 6 | from typing import Optional 7 | 8 | from dotenv import load_dotenv 9 | from loguru import logger 10 | from pydantic import Field, model_validator 11 | from pydantic_settings import BaseSettings, SettingsConfigDict 12 | 13 | 14 | class TelegramBot(BaseSettings): 15 | """ 16 | 代理设置 17 | """ 18 | 19 | token: Optional[str] = Field(None, validation_alias="TELEGRAM_BOT_TOKEN") 20 | proxy_address: Optional[str] = Field( 21 | None, validation_alias="TELEGRAM_BOT_PROXY_ADDRESS" 22 | ) # "all://127.0.0.1:7890" 23 | bot_link: Optional[str] = Field(None, validation_alias="TELEGRAM_BOT_LINK") 24 | bot_id: Optional[str] = Field(None, validation_alias="TELEGRAM_BOT_ID") 25 | bot_username: Optional[str] = Field(None, validation_alias="TELEGRAM_BOT_USERNAME") 26 | model_config = SettingsConfigDict( 27 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 28 | ) 29 | 30 | @model_validator(mode="after") 31 | def bot_validator(self): 32 | if self.proxy_address: 33 | logger.success(f"TelegramBot proxy was set to {self.proxy_address}") 34 | if self.token is None: 35 | logger.info("\n🍀Check:Telegrambot token is empty") 36 | if self.bot_id is None and self.token: 37 | try: 38 | from telebot import TeleBot 39 | 40 | # 创建 Bot 41 | if self.proxy_address is not None: 42 | from telebot import apihelper 43 | 44 | if "socks5://" in self.proxy_address: 45 | self.proxy_address = self.proxy_address.replace( 46 | "socks5://", "socks5h://" 47 | ) 48 | apihelper.proxy = {"https": self.proxy_address} 49 | _bot = TeleBot(token=self.token).get_me() 50 | self.bot_id = str(_bot.id) 51 | self.bot_username = _bot.username 52 | self.bot_link = f"https://t.me/{self.bot_username}" 53 | except Exception as e: 54 | logger.error(f"\n🍀TelegramBot Token Not Set --error {e}") 55 | else: 56 | logger.success( 57 | f"🍀TelegramBot Init Connection Success --bot_name {self.bot_username} --bot_id {self.bot_id}" 58 | ) 59 | return self 60 | 61 | @property 62 | def available(self): 63 | return self.token is not None 64 | 65 | 66 | load_dotenv() 67 | BotSetting = TelegramBot() 68 | -------------------------------------------------------------------------------- /app/setting/whitelist.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/app/setting/whitelist.py -------------------------------------------------------------------------------- /app/tutorial.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/28 下午4:19 3 | # @Author : sudoskys 4 | # @File : tutorial.py 5 | # @Software: PyCharm 6 | import pathlib 7 | from time import sleep 8 | 9 | import elara 10 | from rich.console import Console 11 | 12 | elara_client = elara.exe( 13 | path=pathlib.Path(__file__).parent / ".tutorial.db", commitdb=True 14 | ) 15 | 16 | tutorial = [ 17 | { 18 | "cn": "接下来进行一些说明,如果您不想看到这些说明,请使用 --no-tutorial 参数运行入口文件。", 19 | "en": "Next, some instructions will be given. " 20 | "If you don’t want to see these instructions, please run the entry file with the --no-tutorial flag.", 21 | }, 22 | { 23 | "cn": "请您在 .env 文件中填写您的配置信息。" 24 | "您可以通过 `docker-compose up -f docker-compose.yml` 来测试服务。", 25 | "en": "Please fill in your configuration information in the .env file. " 26 | "You can test the service by `docker-compose up -f docker-compose.yml`.", 27 | }, 28 | { 29 | "cn": "数据库 RabbitMQ 的默认端口为 5672,Redis 的默认端口为 6379,MongoDB 的默认端口为 27017。" 30 | "请您考虑是否需要添加防火墙配置。其中,RabbitMQ 和 MongoDB 均使用默认配置了密码。请查看 .env 文件进行修改。", 31 | "en": "The default port of the database RabbitMQ is 5672, " 32 | "the default port of Redis is 6379, and the default port of MongoDB is 27017." 33 | "Please consider whether to add firewall configuration. " 34 | "Among them, RabbitMQ and MongoDB use the default configuration password. " 35 | "Please check the .env file for modification.", 36 | }, 37 | { 38 | "cn": "请当心您的 .env 文件,其中包含了您的敏感信息。请不要将 .env 文件上传到公共仓库。", 39 | "en": "Please be careful with your .env file, " 40 | "which contains your sensitive information. " 41 | "Please do not upload the .env file to the public repository.", 42 | }, 43 | { 44 | "cn": "请当心您的 日志文件,其中包含了您的敏感信息。请不要将 日志文件上传到公共仓库。", 45 | "en": "Please be careful with your log file, which contains your sensitive information. " 46 | "Please do not upload the log file to the public repository.", 47 | }, 48 | { 49 | "cn": "如果您在使用过程中遇到了问题,可以在 GitHub 上提出 issue 来完善测试。", 50 | "en": "If you encounter any problems during use, you can raise an issue on GitHub to improve the test.", 51 | }, 52 | ] 53 | tutorial_len = len(tutorial) 54 | 55 | 56 | def show_tutorial( 57 | skip_existing: bool = False, pre_step_stop: int = 5, database_key: str = "55123" 58 | ): 59 | global tutorial, elara_client, tutorial_len 60 | lens = elara_client.get(database_key) 61 | if skip_existing and str(lens) == str(len(tutorial)): 62 | return None 63 | # 截取未读的条目 64 | tutorial = tutorial[lens:] if skip_existing else tutorial 65 | console = Console() 66 | print("\n") 67 | with console.status("[bold green]Working on tasks...[/bold green]") as status: 68 | index = 0 69 | while tutorial: 70 | info = tutorial.pop(0) 71 | index += 1 72 | console.print(info["cn"], style="bold cyan") 73 | console.print(info["en"], style="bold green", end="\n\n") 74 | for i in range(pre_step_stop): 75 | status.update( 76 | f"[bold green]({index}/{tutorial_len})Remaining {pre_step_stop - i} " 77 | f"seconds to next info... [/bold green] " 78 | ) 79 | sleep(1) 80 | 81 | # 更新进度 82 | elara_client.set(database_key, tutorial_len) 83 | sleep(3) 84 | -------------------------------------------------------------------------------- /deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Beginning the setup process..." 3 | 4 | # Install Voice dependencies 5 | echo "Installing Voice dependencies..." 6 | apt install ffmpeg 7 | 8 | # Pull RabbitMQ 9 | echo "Pulling RabbitMQ..." 10 | docker pull rabbitmq:3.10-management 11 | 12 | # Check if RabbitMQ container exists 13 | if [ "$(docker ps -a -f name=rabbitmq | grep rabbitmq | wc -l)" -eq 0 ]; then 14 | # Run the RabbitMQ if not exist 15 | echo "Running RabbitMQ..." 16 | docker run -d -p 5672:5672 -p 15672:15672 \ 17 | -e RABBITMQ_DEFAULT_USER=admin \ 18 | -e RABBITMQ_DEFAULT_PASS=8a8a8a \ 19 | --hostname myRabbit \ 20 | --name rabbitmq \ 21 | rabbitmq:3.10-management 22 | else 23 | echo "RabbitMQ already exists. Using it..." 24 | fi 25 | 26 | docker ps -l 27 | 28 | # Clone or update the project 29 | if [ ! -d "Openaibot" ] ; then 30 | echo "Cloning Openaibot..." 31 | git clone https://github.com/LlmKira/Openaibot/ 32 | cd Openaibot || exit 33 | else 34 | echo "Updating Openaibot..." 35 | cd Openaibot || exit 36 | git pull 37 | fi 38 | 39 | echo "Setting up Python dependencies..." 40 | pip install pdm 41 | pdm install -G bot 42 | cp .env.exp .env && nano .env 43 | 44 | # Install or update pm2 45 | if ! [ -x "$(command -v pm2)" ]; then 46 | echo "Installing npm and pm2..." 47 | apt install npm 48 | npm install pm2 -g 49 | fi 50 | 51 | echo "Starting application with pm2..." 52 | pm2 start pm2.json 53 | 54 | echo "Setup complete!" 55 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | llmbot: 5 | image: sudoskys/llmbot:main 6 | container_name: llmbot 7 | volumes: 8 | # 挂载配置目录 config_dir 9 | - ./config_dir:/app/config_dir 10 | deploy: 11 | resources: 12 | limits: 13 | # 保证至多有 0.90 个 CPU 和 4000MB 内存 14 | cpus: '0.90' 15 | memory: 4000M 16 | reservations: 17 | memory: 1000M 18 | depends_on: 19 | - redis 20 | - rabbitmq 21 | - mongodb 22 | environment: 23 | AMQP_DSN: amqp://admin:8a8a8a@rabbitmq:5672 24 | REDIS_DSN: redis://redis:6379/0 25 | MONGODB_DSN: mongodb://admin:8a8a8a@mongodb:27017/?authSource=admin 26 | env_file: 27 | - .env 28 | networks: 29 | - app-tier 30 | logging: 31 | driver: "json-file" 32 | options: 33 | max-size: "50m" 34 | max-file: "3" 35 | 36 | rabbitmq: 37 | image: rabbitmq:3-management 38 | container_name: rabbit 39 | hostname: rabbitmq 40 | restart: on-failure 41 | ports: 42 | - "15672:15672" 43 | - "5672:5672" 44 | environment: 45 | TZ: Asia/Shanghai 46 | RABBITMQ_DEFAULT_USER: admin 47 | RABBITMQ_DEFAULT_PASS: 8a8a8a 48 | volumes: 49 | - ./rabbitmq:/var/lib/rabbitmq 50 | networks: 51 | - app-tier 52 | logging: 53 | driver: "json-file" 54 | options: 55 | max-size: "10m" 56 | max-file: "3" 57 | redis: 58 | container_name: redis-server 59 | image: redis:7 60 | restart: on-failure 61 | environment: 62 | - TZ=Asia/Shanghai 63 | volumes: 64 | - ./redis:/data 65 | ports: 66 | - "6379:6379" 67 | networks: 68 | - app-tier 69 | logging: 70 | driver: "json-file" 71 | options: 72 | max-size: "10m" 73 | max-file: "3" 74 | 75 | mongodb: 76 | container_name: mongodb-server 77 | image: mongo:7 78 | restart: on-failure 79 | environment: 80 | - TZ=Asia/Shanghai 81 | - MONGO_INITDB_ROOT_USERNAME=admin 82 | - MONGO_INITDB_ROOT_PASSWORD=8a8a8a 83 | volumes: 84 | - ./mongodb:/data/db 85 | ports: 86 | - "27017:27017" 87 | networks: 88 | - app-tier 89 | logging: 90 | driver: "json-file" 91 | options: 92 | max-size: "10m" 93 | max-file: "3" 94 | 95 | networks: 96 | app-tier: 97 | driver: bridge 98 | -------------------------------------------------------------------------------- /docs/chain_chat.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/docs/chain_chat.gif -------------------------------------------------------------------------------- /docs/code_interpreter_func.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/docs/code_interpreter_func.gif -------------------------------------------------------------------------------- /docs/dev_note/chat_start.md: -------------------------------------------------------------------------------- 1 | 2 | ## 时钟中断 3 | 4 | 一问多答已经实现了。那么更仿真的对话方式是在循环中时钟中断,每次中断时,投入多组消息。这样就可以实现多组消息的对话了。 5 | -------------------------------------------------------------------------------- /docs/dev_note/hook.md: -------------------------------------------------------------------------------- 1 | ## 钩子 2 | 3 | author@sudoskys 4 | 5 | 由于现阶段的 v1 OpenAI API 没有媒体支持,所以我们需要自己实现一个钩子来渲染媒体数据。 6 | 7 | 当消息传入传出的时候,可以先通过钩子处理消息,然后再传递给 ChatGPT。来实现媒体数据的渲染。 8 | -------------------------------------------------------------------------------- /docs/dev_note/time.md: -------------------------------------------------------------------------------- 1 | # 聊天分片 2 | 3 | author@sudoskys 4 | 5 | ## 时间池化技术 6 | 7 | 人的谈话有一定的主题。在时间上相互关联。 8 | 因此机器人可以在一段时间后直接舍弃历史消息。通过检索检索一些历史消息,最仿生。 9 | 10 | 难点不在于如何切分时间片,而是如何判断当前聊天与历史聊天的关联性。 11 | 12 | ### 解决方案假设 13 | 14 | #### 古典搜索+机械匹配 15 | 16 | - 对每个片段进行主题提取,然后相似性排序。 17 | - 对每个片段进行关键词抽取,然后相似性排序。 18 | 19 | #### 古典搜索+LLM自选函数 20 | 21 | 通过 Toolcall 选择匹配的关键词。 22 | 23 | #### 向量匹配+LLM结构化导出 24 | 25 | 向量数据库,然后通过LLM结构化导出主题。检索主题即可。 26 | 27 | ===== 28 | 29 | 不管是哪种方案,都很麻烦,在实时性强的系统里,对性能额外要求。向量匹配只能放在后台处理。 30 | 31 | #### 投入 Openai 生态 32 | 33 | Openai 有一个很好的方案,向量匹配,检索主题。代价是兼容性(其他模型)。 34 | -------------------------------------------------------------------------------- /docs/note/func_call.md: -------------------------------------------------------------------------------- 1 | # 关于 Function Call 2 | 3 | ## 1. 样板代码 4 | 5 | ```python 6 | from typing import Literal 7 | 8 | 9 | def get_current_weather(location: str, format: Literal["fahrenheit", "celsius"]): 10 | """ 11 | Get the current weather 12 | :param location: The city and state, e.g. San Francisco, CA 13 | :param format: The temperature unit to use. Infer this from the users location. 14 | """ 15 | ``` 16 | 17 | ### Pydantic 数据模型 18 | 19 | pydantic 的数据模型,不完全兼容 openapi 20 | 21 | ```json5 22 | 23 | { 24 | 'title': 'Search', 25 | 'description': '测试搜索类型', 26 | 'type': 'object', 27 | 'properties': { 28 | 'keywords': { 29 | 'title': 'Keywords', 30 | 'description': '关键词', 31 | 'type': 'string' 32 | }, 33 | 'text': { 34 | 'title': 'Text', 35 | 'description': '文本', 36 | 'type': 'string' 37 | } 38 | } 39 | } 40 | 41 | ``` 42 | 43 | 这点 langchain 相关开发有提到 44 | https://github.com/minimaxir/simpleaichat/issues/40 45 | https://github.com/minimaxir/simpleaichat/blob/1dead657731e0d2a4df608a7aa94b86322412851/examples/notebooks/schema_ttrpg.ipynb 46 | https://api.python.langchain.com/en/latest/_modules/langchain/tools/convert_to_openai.html#format_tool_to_openai_function 47 | 48 | ## 标准化格式演示 49 | 50 | ```json5 51 | [ 52 | { 53 | 'name': 'get_current_weather', 54 | 'description': 'Get the current weather', 55 | 'parameters': { 56 | 'type': 'object', 57 | 'properties': { 58 | 'location': { 59 | 'description': 'The city and state, e.g. San Francisco, CA', 60 | 'type': 'string' 61 | }, 62 | 'format': { 63 | 'description': 'The temperature unit to use. Infer this from the users location.', 64 | 'type': 'string', 65 | 'enum': [ 66 | 'fahrenheit', 67 | 'celsius' 68 | ] 69 | } 70 | } 71 | }, 72 | 'required': [ 73 | 'location', 74 | 'format' 75 | ] 76 | } 77 | ] 78 | 79 | ``` 80 | 81 | ## 样板消息 82 | 83 | ```python 84 | example_messages = [ 85 | { 86 | "role": "system", 87 | "content": "You are a helpful, pattern-following assistant that translates corporate jargon into plain English.", 88 | }, 89 | { 90 | "role": "system", 91 | "name": "example_user", 92 | "content": "New synergies will help drive top-line growth.", 93 | }, 94 | { 95 | "role": "system", 96 | "name": "example_assistant", 97 | "content": "Things working well together will increase revenue.", 98 | }, 99 | { 100 | "role": "system", 101 | "name": "example_user", 102 | "content": "Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.", 103 | }, 104 | { 105 | "role": "system", 106 | "name": "example_assistant", 107 | "content": "Let's talk later when we're less busy about how to do better.", 108 | }, 109 | { 110 | "role": "user", 111 | "content": "This late pivot means we don't have time to boil the ocean for the client deliverable.", 112 | }, 113 | ] 114 | ``` 115 | 116 | 117 | ## 备注 118 | 119 | `Open AI does not support an array type` 120 | -------------------------------------------------------------------------------- /docs/note/refer.md: -------------------------------------------------------------------------------- 1 | 讲的不错的文章。 2 | 3 | pydantic 4 | https://qiita.com/koralle/items/93b094ddb6d3af917702 5 | https://github.com/openai/openai-python/blob/main/chatml.md 6 | 7 | interesting project 8 | https://janekb04.github.io/py2gpt/ 9 | https://github.com/janekb04/py2gpt/blob/main/main.py 10 | 11 | How to format inputs to ChatGPT models 12 | https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb 13 | 14 | LLM+Embedding构建问答系统的局限性及优化方案 15 | https://zhuanlan.zhihu.com/p/641132245 16 | 17 | ChatGPT 越过山丘之后,再来谈谈 LLM 应用方向 18 | https://quail.ink/orange/p/chatgpt-cross-over-the-hills-and-discuss-llm-application-directions 19 | 20 | ChatGPT代码解释器非常强大 21 | https://mp.weixin.qq.com/s/nob0sd6NYgbP1vwRyoQvig 22 | 23 | Openai Api Docs 24 | https://platform.openai.com/docs/api-reference/completions 25 | 26 | GOT 27 | https://github.com/spcl/graph-of-thoughts 28 | 29 | AI Agents大爆发:软件2.0雏形初现,OpenAI的下一步 30 | https://mp.weixin.qq.com/s/Jb8HBbaKYXXxTSQOBsP5Wg 31 | 32 | ChatGPT Plugin:被高估的“App Store时刻”,软件和SaaS生态的重组开端 33 | https://mp.weixin.qq.com/s?__biz=Mzg2OTY0MDk0NQ==&mid=2247502626&idx=1&sn=17603eac4304cbe6508866910e2943f3&chksm=ce9b74bcf9ecfdaa3589356f8e2a6351831955d49cf65f22a43ba90559782d20522f629c9448&scene=21#wechat_redirect 34 | 35 | LangChain? 36 | https://twitter.com/AravSrinivas/status/1677884199183994881 37 | https://minimaxir.com/2023/07/langchain-problem/ 38 | 39 | """ 40 | 1 lock 41 | 2 ... 42 | 1 ... 43 | 2 lock 44 | 1 ... 45 | 2 ... 46 | 3 lock 47 | """ 48 | -------------------------------------------------------------------------------- /docs/note/resign.md: -------------------------------------------------------------------------------- 1 | > 此设计已经被完成,并过时。 2 | > 3 | > 4 | 5 | ## 函数注册管理器 6 | 7 | 组件注册关键词:`@register(keywords, **kwargs)`,检测到关键词后,注册函数和对应的message。 8 | 9 | ```python 10 | class Exp: 11 | function 12 | keyword 13 | message 14 | ``` 15 | 16 | 内部构建一个表用于回调。 17 | 18 | 检测到关键词,调用对应的组件函数,传入message,返回message然后回载。 19 | 20 | def register(self, target): 21 | def add_register_item(key, value): 22 | if not callable(value): 23 | raise Exception(f"register object must be callable! But receice:{value} is not callable!") 24 | if key in self._dict: 25 | print(f"warning: \033[33m{value.__name__} has been registered before, so we will overriden it\033[0m") 26 | self[key] = value 27 | return value 28 | 29 | if callable(target): # 如果传入的目标可调用,说明之前没有给出注册名字,我们就以传入的函数或者类的名字作为注册名 30 | return add_register_item(target.__name__, target) 31 | else: # 如果不可调用,说明额外说明了注册的可调用对象的名字 32 | return lambda x : add_register_item(target, x) 33 | 34 | ```python 35 | @resigner.register("search") 36 | class Search(BaseModel): 37 | function: ... 38 | 39 | def func_message(self, message_text): 40 | """ 41 | 如果合格则返回message,否则返回None,表示不处理 42 | """ 43 | if ...: 44 | return self.function 45 | else: 46 | return None 47 | 48 | async def __call__(self, *args, **kwargs): 49 | """ 50 | 处理message,返回message 51 | """ 52 | return ... 53 | ``` 54 | -------------------------------------------------------------------------------- /docs/project_cover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/docs/project_cover.png -------------------------------------------------------------------------------- /docs/schema_exp/openai_response.py: -------------------------------------------------------------------------------- 1 | Data = { 2 | 'id': 'chatcmpl-8J2GoGKFhMHc9ZHqUO', 3 | 'object': 'chat.completion', 4 | 'created': 1691547350, 5 | 'model': 'gpt-3.5-turbo-0613', 6 | 'choices': [ 7 | { 8 | 'index': 0, 9 | 'message': { 10 | 'role': 'assistant', 11 | 'content': None, 12 | 'tool_calls': [ 13 | { 14 | 'id': 'call_l0a6IzA5KJARPjh7BkHEUe', 15 | 'type': 'function', 16 | 'function': { 17 | 'name': 'set_alarm_reminder', 18 | 'arguments': '{\n "delay": 3,\n "content": "叮铃铃!三分钟已经过去了~"\n}' 19 | } 20 | } 21 | ] 22 | }, 23 | 'finish_reason': 'tool_calls' 24 | } 25 | ], 26 | 'usage': { 27 | 'prompt_tokens': 198, 28 | 'completion_tokens': 38, 29 | 'total_tokens': 236, 30 | 'pre_token_count': 4096, 31 | 'pre_total': 42, 32 | 'adjust_total': 40, 33 | 'final_total': 2 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /docs/sticker_func.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/docs/sticker_func.gif -------------------------------------------------------------------------------- /docs/test_script/bilibili.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/22 下午2:15 3 | # @Author : sudoskys 4 | # @File : _bilibili.py 5 | # @Software: PyCharm 6 | import json 7 | 8 | import inscriptis 9 | from bilibili_api import search, sync, video_zone 10 | 11 | 12 | async def test_f_search_by_order(): 13 | return await search.search_by_type("ウサギの現実は逃げる", search_type=search.SearchObjectType.VIDEO, 14 | order_type=search.OrderVideo.TOTALRANK, time_range=10, page=1) 15 | 16 | 17 | res = sync(test_f_search_by_order()) 18 | print(json.dumps(res, indent=4, ensure_ascii=False)) 19 | 20 | 21 | async def search_on_bilibili(keywords): 22 | _result = await search.search_by_type( 23 | keyword=keywords, 24 | search_type=search.SearchObjectType.VIDEO, 25 | order_type=search.OrderVideo.TOTALRANK, 26 | page=1 27 | ) 28 | _video_list = _result.get("result") 29 | if not _video_list: 30 | return "Search Not Success" 31 | _video_list = _video_list[:3] # 只取前三 32 | _info = [] 33 | for video in _video_list: 34 | _video_title = inscriptis.get_text(video.get("title")) 35 | _video_author = video.get("author") 36 | _video_url = video.get("arcurl") 37 | _video_tag = video.get("tag") 38 | _video_play = video.get("play") 39 | _video_info = f"标题:{_video_title}\n作者:{_video_author}\n链接:{_video_url}\n标签:{_video_tag}\n播放量:{_video_play}" 40 | _info.append(_video_info) 41 | return "\n\n".join(_info) 42 | -------------------------------------------------------------------------------- /docs/test_script/database/note_rabbit_connect.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午10:09 3 | # @Author : sudoskys 4 | # @File : ra.py 5 | # @Software: PyCharm 6 | import threading 7 | 8 | import pika 9 | 10 | 11 | class SingletonClass(object): 12 | """单例模式用来少创建连接""" 13 | # 加锁,防止并发较高时,同时创建对象,导致创建多个对象 14 | _singleton_lock = threading.Lock() 15 | 16 | def __init__(self, username='baibing', password='123456', ip='47.xxx.xxx.xx', port=5672, data={}): 17 | """__init__在new出来对象后实例化对象""" 18 | self.credentials = pika.PlainCredentials(username, password) 19 | self.connection = pika.BlockingConnection( 20 | pika.ConnectionParameters(host=ip, port=port, credentials=self.credentials)) 21 | self.channel = self.connection.channel() 22 | print('连接成功') 23 | 24 | def __new__(cls): 25 | """__new__用来创建对象""" 26 | if not hasattr(SingletonClass, "_instance"): 27 | with SingletonClass._singleton_lock: 28 | if not hasattr(SingletonClass, "_instance"): 29 | SingletonClass._instance = super().__new__(cls) 30 | return SingletonClass._instance 31 | 32 | def callback(self, ch, method, properties, body): 33 | """订阅者的回调函数,可以在这里面做操作,比如释放库存等""" 34 | ch.basic_ack(delivery_tag=method.delivery_tag) # ack机制, 35 | print("邮箱", body.decode()) 36 | 37 | def connection_close(self): 38 | """关闭连接""" 39 | self.connection.close() 40 | 41 | def consuming_start(self): 42 | """等待消息""" 43 | self.channel.start_consuming() 44 | 45 | def this_publisher(self, email, queue_name='HELLOP'): 46 | """发布者 47 | email:消息 48 | queue_name:队列名称 49 | """ 50 | 51 | # 1、创建一个名为python-test的交换机 durable=True 代表exchange持久化存储 52 | self.channel.exchange_declare(exchange='python', durable=True, exchange_type='direct') 53 | # self.channel.queue_declare(queue=queue_name) 54 | # 2、订阅发布模式,向名为python-test的交换机中插入用户邮箱地址email,delivery_mode = 2 声明消息在队列中持久化,delivery_mod = 1 消息非持久化 55 | self.channel.basic_publish(exchange='python', 56 | routing_key='OrderId', 57 | body=email, 58 | properties=pika.BasicProperties(delivery_mode=2) 59 | ) 60 | 61 | print("队列{}发送用户邮箱{}到MQ成功".format(queue_name, email)) 62 | # 3. 关闭连接 63 | self.connection_close() 64 | 65 | def this_subscriber(self, queue_name='HELLOP', prefetch_count=10): 66 | """订阅者 67 | queue_name:队列名称 68 | prefetch_count:限制未处理消息的最大值,ack未开启时生效 69 | """ 70 | # 创建临时队列,队列名传空字符,consumer关闭后,队列自动删除 71 | result = self.channel.queue_declare('', durable=True, exclusive=True) 72 | 73 | # 声明exchange,由exchange指定消息在哪个队列传递,如不存在,则创建。durable = True 代表exchange持久化存储,False 非持久化存储 74 | self.channel.exchange_declare(exchange='python', durable=True, exchange_type='direct') 75 | 76 | # 绑定exchange和队列 exchange 使我们能够确切地指定消息应该到哪个队列去 77 | self.channel.queue_bind(exchange='python', queue=result.method.queue, routing_key='OrderId') 78 | 79 | self.channel.basic_consume( 80 | result.method.queue, 81 | self.callback, # 回调地址(函数) 82 | auto_ack=False # 设置成 False,在调用callback函数时,未收到确认标识,消息会重回队列。True,无论调用callback成功与否,消息都被消费掉 83 | ) 84 | # 等待消息 85 | self.consuming_start() 86 | -------------------------------------------------------------------------------- /docs/test_script/database/note_rabbit_pika_usage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午7:28 3 | # @Author : sudoskys 4 | # @File : rabbit.py 5 | # @Software: PyCharm 6 | # !/usr/bin/env python 7 | import atexit 8 | import time 9 | 10 | from pydantic import BaseModel 11 | 12 | 13 | class Test(BaseModel): 14 | name: str 15 | age: int 16 | 17 | 18 | import pika 19 | 20 | # 创建凭证,使用rabbitmq用户密码登录 21 | # 去邮局取邮件,必须得验证身份 22 | credentials = pika.PlainCredentials("admin", "admin") 23 | # 新建连接,这里localhost可以更换为服务器ip 24 | # 找到这个邮局,等于连接上服务器 25 | connection = pika.BlockingConnection(pika.ConnectionParameters('127.0.0.1', credentials=credentials)) 26 | 27 | # 创建频道 28 | # 建造一个大邮箱,隶属于这家邮局的邮箱,就是个连接 29 | channel = connection.channel() 30 | # 声明一个队列,用于接收消息,队列名字叫“水许传” 31 | channel.queue_declare(queue='水许传') 32 | # 注意在rabbitmq中,消息想要发送给队列,必须经过交换(exchange),初学可以使用空字符串交换(exchange=''),它允许我们精确的指定发送给哪个队列(routing_key=''),参数body值发送的数据 33 | channel.basic_publish(exchange='', 34 | routing_key='水许传', 35 | body=Test(name="sudoskys", age=19).json()) 36 | print("已经发送了消息") 37 | # 程序退出前,确保刷新网络缓冲以及消息发送给rabbitmq,需要关闭本次连接 38 | time.sleep(100) 39 | 40 | 41 | @atexit.register 42 | def __clean(): 43 | connection.close() 44 | -------------------------------------------------------------------------------- /docs/test_script/database/note_rabbitmq_receiver.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/17 下午9:05 3 | # @Author : sudoskys 4 | # @File : rabbit_rev.py 5 | # @Software: PyCharm 6 | import pika 7 | from pydantic import BaseModel 8 | 9 | # 建立与rabbitmq的连接 10 | credentials = pika.PlainCredentials("admin", "admin") 11 | connection = pika.BlockingConnection(pika.ConnectionParameters('127.0.0.1', credentials=credentials)) 12 | channel = connection.channel() 13 | channel.queue_declare(queue="水许传") 14 | 15 | 16 | def callbak(ch, method, properties, body): 17 | class Test(BaseModel): 18 | name: str 19 | age: int 20 | print(Test.parse_raw(body)) 21 | print("消费者接收到了任务:%r" % body.decode("utf8")) 22 | # 手动确认消息已经被消费 23 | ch.basic_ack(delivery_tag=method.delivery_tag) 24 | 25 | 26 | # 有消息来临,立即执行callbak,没有消息则夯住,等待消息 27 | # 老百姓开始去邮箱取邮件啦,队列名字是水许传 28 | channel.basic_consume(on_message_callback=callbak, queue="水许传", auto_ack=False) 29 | # 开始消费,接收消息 30 | channel.start_consuming() 31 | -------------------------------------------------------------------------------- /docs/test_script/database/note_redis_lpush_usage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/11 下午3:48 3 | # @Author : sudoskys 4 | # @File : redis_lpush.py 5 | # @Software: PyCharm 6 | import time 7 | 8 | from llmkira.sdk.cache import RedisRuntime 9 | 10 | 11 | async def redis(): 12 | cache = RedisRuntime().get_redis() 13 | await cache.lpush_data("test", int(time.time())) 14 | await cache.lpush_data("test", int(time.time()) + 1) 15 | _data = await cache.lrange_data("test") 16 | print(f"now data is {_data}") 17 | _data = await cache.lpop_data("test") 18 | print(f"pop data is {_data}") 19 | _data = await cache.lrange_data("test") 20 | print(f"now data is {_data}") 21 | _data = await cache.lrange_data("test", start_end=(0, 1)) 22 | print(f"(0,1) data is {_data}") 23 | 24 | 25 | import asyncio 26 | 27 | asyncio.run(redis()) 28 | -------------------------------------------------------------------------------- /docs/test_script/discord.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/16 上午11:46 3 | # @Author : sudoskys 4 | # @File : discord.py 5 | # @Software: PyCharm 6 | # This example requires the 'message_content' intent. 7 | 8 | import nextcord 9 | from nextcord.ext import commands 10 | 11 | TESTING_GUILD_ID = 123456789 # Replace with your guild ID 12 | 13 | bot = commands.Bot() 14 | 15 | 16 | @bot.command() 17 | async def hello(ctx): 18 | await ctx.reply("Hello!") 19 | 20 | 21 | @bot.event 22 | async def on_ready(): 23 | print(f'We have logged in as {bot.user}') 24 | 25 | 26 | @bot.slash_command(description="My first slash command", guild_ids=[TESTING_GUILD_ID]) 27 | async def hello(interaction: nextcord.Interaction): 28 | await interaction.send("Hello!") 29 | 30 | 31 | bot.run('your token here') 32 | -------------------------------------------------------------------------------- /docs/test_script/duckduck.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/24 下午11:36 3 | # @Author : sudoskys 4 | # @File : duckduck.py 5 | # @Software: PyCharm 6 | from duckduckgo_search import DDGS 7 | 8 | from llmkira.sdk import Sublimate 9 | 10 | search = "评价一下刀郎的罗刹海市?" 11 | key = ["刀郎"] 12 | 13 | with DDGS(timeout=20) as ddgs: 14 | _text = [] 15 | for r in ddgs.text(search): 16 | _title = r.get("title") 17 | _href = r.get("href") 18 | _body = r.get("body") 19 | _text.append(_body) 20 | print(_text) 21 | _test_result = Sublimate(_text).valuation(match_sentence=search, match_keywords=key) 22 | for key in _test_result: 23 | print(key) 24 | -------------------------------------------------------------------------------- /docs/test_script/fake_plugin/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/12 下午11:45 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | __plugin_meta__ = "test" 7 | print(__plugin_meta__) 8 | -------------------------------------------------------------------------------- /docs/test_script/func_call.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/16 下午6:56 3 | # @Author : sudoskys 4 | # @File : func_call.py 5 | # @Software: PyCharm 6 | 7 | 8 | from llmkira.sdk import Function 9 | 10 | from llmkira.sdk.func_calling.register import listener 11 | from llmkira.sdk import BaseTool 12 | 13 | search = Function(name="get_current_weather", description="Get the current weather") 14 | search.add_property( 15 | property_name="location", 16 | property_description="The city and state, e.g. San Francisco, CA", 17 | property_type="string", 18 | required=True 19 | ) 20 | 21 | 22 | @listener(function=search) 23 | class SearchTool(BaseTool): 24 | """ 25 | 搜索工具 26 | """ 27 | function: Function = search 28 | 29 | def func_message(self, message_text): 30 | """ 31 | 如果合格则返回message,否则返回None,表示不处理 32 | """ 33 | if "搜索" in message_text: 34 | return self.function 35 | else: 36 | return None 37 | 38 | async def __call__(self, *args, **kwargs): 39 | """ 40 | 处理message,返回message 41 | """ 42 | return "搜索成功" 43 | -------------------------------------------------------------------------------- /docs/test_script/funtion.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/16 上午12:26 3 | # @Author : sudoskys 4 | # @File : test_funtion.py 5 | # @Software: PyCharm 6 | import json 7 | from typing import Optional, Literal, List 8 | 9 | from pydantic import BaseModel 10 | 11 | 12 | class Function(BaseModel): 13 | class Parameters(BaseModel): 14 | type: str = "object" 15 | properties: dict = {} 16 | 17 | class Meta(BaseModel): 18 | tips: str = "This is a function" 19 | 20 | _meta: Meta = Meta() 21 | name: str 22 | description: Optional[str] = None 23 | parameters: Parameters = Parameters(type="object") 24 | required: List[str] 25 | 26 | def add_property(self, property_name: str, 27 | property_type: Literal["string", "integer", "number", "boolean", "object"], 28 | property_description: str, 29 | enum: Optional[tuple] = None, 30 | required: bool = False 31 | ): 32 | self.parameters.properties[property_name] = {} 33 | self.parameters.properties[property_name]['type'] = property_type 34 | self.parameters.properties[property_name]['description'] = property_description 35 | if enum: 36 | self.parameters.properties[property_name]['enum'] = tuple(enum) 37 | if required: 38 | self.required.append(property_name) 39 | 40 | 41 | if __name__ == '__main__': 42 | f = Function(name="call", description="make a call", required=[]) 43 | f.add_property( 44 | property_name="user_name", 45 | property_type="string", 46 | property_description="user name for calling", 47 | enum=("Li", "Susi"), 48 | required=True 49 | ) 50 | # print(Function.schema_json(indent=4)) 51 | print(json.dumps(f.model_dump(), indent=4)) 52 | -------------------------------------------------------------------------------- /docs/test_script/inpoint.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/9/18 下午9:35 3 | # @Author : sudoskys 4 | # @File : inpoint.py 5 | # @Software: PyCharm 6 | from arclet.alconna import Alconna, Option, Subcommand, Args 7 | 8 | cmd = Alconna( 9 | "/pip", 10 | Subcommand("install", Option("-u|--upgrade"), Args.pak_name[str]), 11 | Option("list") 12 | ) 13 | 14 | result = cmd.parse("/pip install ss numpy2 --upgrade") # 该方法返回一个Arpamar类的实例 15 | print(result.query('install')) 16 | -------------------------------------------------------------------------------- /docs/test_script/note_emoji_regex.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/22 下午11:53 3 | # @Author : sudoskys 4 | # @File : emoji_.py 5 | # @Software: PyCharm 6 | 7 | import pathlib 8 | from typing import Literal 9 | 10 | import emoji 11 | 12 | 13 | def get_sticker_table(sticker_dir: pathlib.Path) -> dict: 14 | if not sticker_dir.exists() or not sticker_dir.is_dir(): 15 | raise Exception('sticker dir not exists') 16 | sticker_list = list(sticker_dir.glob('*.png')) 17 | _emoji = {} 18 | for sticker in sticker_list: 19 | if len(emoji.emojize(sticker.stem)) == 1: 20 | _emoji[emoji.emojize(sticker.stem)] = sticker.absolute() 21 | return _emoji 22 | 23 | 24 | table = get_sticker_table(pathlib.Path('sticker')) 25 | _emoji_list = ",".join(table.keys()) 26 | print(f"Literal[{_emoji_list}]") 27 | 28 | import re 29 | 30 | emoji_pattern = re.compile("[" 31 | u"\U0001F600-\U0001F64F" # emoticons 32 | u"\U0001F300-\U0001F5FF" # symbols & pictographs 33 | u"\U0001F680-\U0001F6FF" # transport & map symbols 34 | u"\U0001F1E0-\U0001F1FF" # flags (iOS) 35 | "]+", flags=re.UNICODE) 36 | print(emoji_pattern.findall("👍 sdasda")) 37 | 38 | print(emoji.emojize("👍")) 39 | 40 | MODEL = Literal["👍", "👍🏻", "👍🏼", "👍🏽", "👍🏾", "👍🏿"] 41 | print(MODEL.__args__[2]) 42 | -------------------------------------------------------------------------------- /docs/test_script/note_entry_point.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/12 下午9:48 3 | # @Author : sudoskys 4 | # @File : entry_point.py 5 | # @Software: PyCharm 6 | 7 | import importlib_metadata 8 | 9 | _result = importlib_metadata.entry_points().select(group="llmkira.extra.plugin") 10 | print(type(_result)) 11 | for item in _result: 12 | print(item.module) 13 | rs = item.load() 14 | print(rs) 15 | 16 | print(_result) 17 | # importlib.import_module(_result.module) 18 | 19 | -------------------------------------------------------------------------------- /docs/test_script/note_github_bot.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/15 下午9:49 3 | # @Author : sudoskys 4 | # @File : note_github_bot.py 5 | # @Software: PyCharm 6 | import os 7 | 8 | import requests 9 | from dotenv import load_dotenv 10 | from flask import Flask, request 11 | from github import Github, GithubIntegration 12 | 13 | load_dotenv() 14 | 15 | __flask_app_name__ = 'github_bot' 16 | app = Flask(__flask_app_name__) 17 | app_id = int(os.environ['GITHUB_APP_ID']) 18 | with open( 19 | os.path.normpath(os.path.expanduser(os.getenv("GITHUB_PRIVATE_KEY_FILE", '~/.certs/github/bot_key.pem'))), 20 | 'r' 21 | ) as cert_file: 22 | app_key = cert_file.read() 23 | 24 | git_integration = GithubIntegration( 25 | app_id, 26 | app_key, 27 | ) 28 | 29 | 30 | @app.route("/", methods=['POST']) 31 | def bot(): 32 | # Get the event payload 33 | payload = request.json 34 | # Check if the event is a GitHub PR creation event 35 | if not all(k in payload.keys() for k in ['action', 'pull_request']) and \ 36 | payload['action'] == 'opened': 37 | return "ok" 38 | owner = payload['repository']['owner']['login'] 39 | repo_name = payload['repository']['name'] 40 | # Get a git connection as our bot 41 | # Here is where we are getting the permission to talk as our bot and not 42 | # as a Python webservice 43 | git_connection = Github( 44 | login_or_token=git_integration.get_access_token( 45 | git_integration.get_installation(owner, repo_name).id 46 | ).token 47 | ) 48 | repo = git_connection.get_repo(f"{owner}/{repo_name}") 49 | 50 | issue = repo.get_issue(number=payload['pull_request']['number']) 51 | 52 | # Call meme-api to get a random meme 53 | response = requests.get(url='https://meme-api.herokuapp.com/gimme') 54 | if response.status_code != 200: 55 | return 'ok' 56 | 57 | # Get the best resolution meme 58 | meme_url = response.json()['preview'][-1] 59 | # Create a comment with the random meme 60 | issue.create_comment(f"![Alt Text]({meme_url})") 61 | return "ok" 62 | 63 | 64 | if __name__ == "__main__": 65 | app.run(debug=True, port=5000) 66 | -------------------------------------------------------------------------------- /docs/test_script/note_github_bot_test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/15 下午10:19 3 | # @Author : sudoskys 4 | # @File : note_github_bot_test.py 5 | # @Software: PyCharm 6 | import os 7 | 8 | from dotenv import load_dotenv 9 | from github_bot_api import Event, Webhook 10 | from github_bot_api import GithubApp 11 | from github_bot_api.flask import create_flask_app 12 | 13 | load_dotenv() 14 | 15 | app_id = int(os.environ['GITHUB_APP_ID']) 16 | with open( 17 | os.path.normpath(os.path.expanduser(os.getenv("GITHUB_PRIVATE_KEY_FILE", '~/.certs/github/bot_key.pem'))), 18 | 'r' 19 | ) as cert_file: 20 | app_key = cert_file.read() 21 | 22 | app = GithubApp( 23 | user_agent='my-bot/0.0.0', 24 | app_id=app_id, 25 | private_key=app_key 26 | ) 27 | 28 | webhook = Webhook(secret=None) 29 | 30 | 31 | @webhook.listen('issues') 32 | def on_pull_request(event: Event) -> bool: 33 | print(event.payload) 34 | client = app.installation_client(event.payload['installation']['id']) 35 | repo = client.get_repo(event.payload['repository']['full_name']) 36 | issue = repo.get_issue(number=event.payload['issue']['number']) 37 | issue.create_comment('Hello World') 38 | return True 39 | 40 | 41 | @webhook.listen('issue_comment') 42 | def on_issue_comment(event: Event) -> bool: 43 | print(event.payload) 44 | client = app.installation_client(event.payload['installation']['id']) 45 | repo = client.get_repo(event.payload['repository']['full_name']) 46 | issue = repo.get_issue(number=event.payload['issue']['number']) 47 | issue.edit( 48 | body=f"Hello World\n\n{issue.body}" 49 | 50 | ) 51 | return True 52 | 53 | 54 | import os 55 | 56 | os.environ['FLASK_ENV'] = 'development' 57 | flask_app = create_flask_app(__name__, webhook) 58 | flask_app.run() 59 | -------------------------------------------------------------------------------- /docs/test_script/note_kook_usage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/20 下午2:02 3 | # @Author : sudoskys 4 | # @File : khl_2.py 5 | # @Software: PyCharm 6 | from khl import Bot, Message, api, HTTPRequester, Cert, MessageTypes 7 | 8 | bot_token = "xxxx" 9 | # init Bot 10 | bot = Bot(token=bot_token) 11 | # bot.client.create_asset(file="") 12 | 13 | 14 | # register command, send `/hello` in channel to invoke 15 | @bot.command(name='wss') 16 | async def world(msg: Message): 17 | await msg.reply('world!') 18 | print(msg.author_id) 19 | # return 20 | print(msg.ctx.channel.id) 21 | print(await msg.ctx.channel.send("h")) 22 | await HTTPRequester(cert=Cert(token=bot_token)).exec_req(api.Message.create( 23 | target_id=msg.ctx.channel.id, 24 | type=MessageTypes.KMD.value, 25 | content='hello!', 26 | # temp_target_id=msg.author.id, 27 | ) 28 | ) 29 | 30 | 31 | print('hello world') 32 | # everything done, go ahead now! 33 | 34 | import requests 35 | 36 | 37 | class KookHttpClient(object): 38 | def __init__(self, token): 39 | self.base_url = 'https://www.kookapp.cn' 40 | self.bot_token = token 41 | 42 | def request(self, method, url, data=None): 43 | headers = { 44 | 'Authorization': f'Bot {self.bot_token}' 45 | } 46 | response = requests.request(method, f'{self.base_url}{url}', headers=headers, json=data) 47 | return response.json() 48 | 49 | def create_channel_message(self, target_id, content, quote=None): 50 | data = { 51 | 'type': 1, 52 | 'target_id': target_id, 53 | 'content': content, 54 | 'quote': quote 55 | } 56 | return self.request('POST', '/api/v3/message/create', data) 57 | 58 | def create_direct_message(self, target_id, content, quote=None): 59 | data = { 60 | 'type': 1, 61 | 'target_id': target_id, 62 | 'content': content, 63 | 'quote': quote 64 | } 65 | return self.request('POST', '/api/v3/direct-message/create', data) 66 | 67 | 68 | # 示例用法 69 | sdk = KookHttpClient(bot_token) 70 | # sdk.create_channel_message('channel_id', 'Hello, KOOK!') 71 | _res = sdk.create_direct_message('1564611098', 'Hey there!') 72 | print(_res) 73 | -------------------------------------------------------------------------------- /docs/test_script/note_match_re.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/11 下午11:09 3 | # @Author : sudoskys 4 | # @File : match.py 5 | # @Software: PyCharm 6 | import re 7 | from pprint import pprint 8 | 9 | if __name__ == '__main__': 10 | result = re.compile(r"(.+).jpg|(.+).png|(.+).jpeg|(.+).gif|(.+).webp|(.+).svg").match("1.webp") 11 | pprint(result) 12 | -------------------------------------------------------------------------------- /docs/test_script/note_openai_req.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/16 下午1:43 3 | # @Author : sudoskys 4 | # @File : openai.py 5 | # @Software: PyCharm 6 | 7 | from llmkira.sdk.endpoint import openai 8 | from llmkira.sdk.endpoint.tee import Driver 9 | 10 | example_messages = [ 11 | { 12 | "role": "system", 13 | "content": "You are a helpful, pattern-following assistant that translates corporate jargon into plain English.", 14 | }, 15 | { 16 | "role": "system", 17 | "name": "example_user", 18 | "content": "New synergies will help drive top-line growth.", 19 | }, 20 | { 21 | "role": "system", 22 | "name": "example_assistant", 23 | "content": "Things working well together will increase revenue.", 24 | }, 25 | { 26 | "role": "system", 27 | "name": "example_user", 28 | "content": "Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.", 29 | }, 30 | { 31 | "role": "system", 32 | "name": "example_assistant", 33 | "content": "Let's talk later when we're less busy about how to do better.", 34 | }, 35 | { 36 | "role": "user", 37 | "content": "This late pivot means we don't have time to boil the ocean for the client deliverable.", 38 | }, 39 | ] 40 | if __name__ == '__main__': 41 | from loguru import logger 42 | import asyncio 43 | 44 | _list = [ 45 | { 46 | "role": "system", 47 | "content": "You are a helpful, pattern-following assistant.", 48 | }, 49 | { 50 | "role": "system", 51 | "name": "example_user", 52 | "content": "New synergies will help drive top-line growth.", 53 | }, 54 | { 55 | "role": "user", 56 | "name": "Miku_Chan", 57 | # "content": "你知道悉尼是哪个国家的首都吗?", 58 | "content": "帮我看一下悉尼的天气", 59 | }, 60 | ] 61 | message = [] 62 | for i in _list: 63 | message.append(sdk.schema.Message(**i)) 64 | logger.debug(message) 65 | driver = Driver() 66 | logger.debug(driver) 67 | search = sdk.schema.Function(name="get_current_weather", description="Get the current weather") 68 | search.add_property( 69 | property_name="location", 70 | property_description="The city and state, e.g. San Francisco, CA", 71 | property_type="string", 72 | required=True 73 | ) 74 | endpoint = openai.Openai(config=driver, messages=message, functions=[search]) 75 | 76 | 77 | async def main(): 78 | _result = await endpoint.create() 79 | logger.debug(_result) 80 | 81 | 82 | try: 83 | loop = asyncio.get_event_loop() 84 | loop.run_until_complete(main()) 85 | except Exception as e: 86 | logger.exception(e) 87 | -------------------------------------------------------------------------------- /docs/test_script/note_pydantic_alias_usage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/17 上午10:18 3 | # @Author : sudoskys 4 | # @File : pydantic.py 5 | # @Software: PyCharm 6 | 7 | from pydantic import BaseModel, Field 8 | 9 | 10 | class Test(BaseModel): 11 | __slots__ = () 12 | test: int = Field(0, alias="test") 13 | env_required: list = Field([], alias="env_required") 14 | env_help_docs: str = Field("") 15 | 16 | 17 | class Child(Test): 18 | rs = 1 19 | env_required = ["test"] 20 | 21 | 22 | _r = Child().model_dump() 23 | _s = Child().env_help_docs 24 | print(_r) 25 | -------------------------------------------------------------------------------- /docs/test_script/note_pydantic_class_or_subclass.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/9/21 下午11:34 3 | # @Author : sudoskys 4 | # @File : type_tes.py 5 | # @Software: PyCharm 6 | from pydantic import BaseModel 7 | 8 | 9 | class Base(BaseModel): 10 | at = 0 11 | 12 | 13 | class Base2(Base): 14 | ut = 1 15 | 16 | @property 17 | def att(self): 18 | return 1 19 | 20 | 21 | def test(): 22 | return 1 23 | 24 | 25 | if __name__ == '__main__': 26 | print(isinstance(Base2(), Base)) 27 | print(isinstance(Base(), Base2)) 28 | print(hasattr(Base(), 'att')) 29 | print([Base2()]) 30 | assert isinstance(Base2(), Base) 31 | -------------------------------------------------------------------------------- /docs/test_script/note_rss_parser.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/22 上午12:19 3 | # @Author : sudoskys 4 | # @File : rss.py 5 | # @Software: PyCharm 6 | import json 7 | import socket 8 | 9 | import feedparser 10 | import html2text 11 | 12 | socket.setdefaulttimeout(5) 13 | feed_url = "https://ww"#"https://www.gcores.com/rss" # "https://www.zhihu.com/rss" 14 | res = feedparser.parse(feed_url) 15 | print(res) 16 | print(json.dumps(res, indent=4, ensure_ascii=False)) 17 | entries = res["entries"] 18 | source = res["feed"]["title"] 19 | _info = [ 20 | { 21 | "title": entry["title"], 22 | "url": entry["link"], 23 | "id": entry["id"], 24 | "author": entry["author"], 25 | "summary": html2text.html2text(entry["summary"]), 26 | } 27 | for entry in entries 28 | ] 29 | print(_info) 30 | -------------------------------------------------------------------------------- /docs/test_script/pydantic_debug.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/13 下午6:27 3 | # @Author : sudoskys 4 | # @File : pydantic_debug.py 5 | # @Software: PyCharm 6 | from typing import Union, Optional, List, Tuple, Dict, Any 7 | 8 | from pydantic import model_validator, Field, BaseModel, ConfigDict 9 | 10 | from llmkira.sdk.schema import ToolMessage, FunctionMessage, TaskBatch, Function 11 | 12 | 13 | class Meta(BaseModel): 14 | class Callback(BaseModel): 15 | function_response: str = Field("empty response", description="工具响应内容") 16 | name: str = Field(None, description="功能名称", pattern=r"^[a-zA-Z0-9_]+$") 17 | tool_call_id: Optional[str] = Field(None, description="工具调用ID") 18 | 19 | @model_validator(mode="after") 20 | def check(self): 21 | """ 22 | 检查回写消息 23 | """ 24 | if not self.tool_call_id and not self.name: 25 | raise ValueError("tool_call_id or name must be set") 26 | return self 27 | 28 | @classmethod 29 | def create(cls, 30 | *, 31 | function_response: str, 32 | name: str, 33 | tool_call_id: Union[str, None] = None 34 | ): 35 | return cls( 36 | function_response=function_response, 37 | name=name, 38 | tool_call_id=tool_call_id, 39 | ) 40 | 41 | def get_tool_message(self) -> Union[ToolMessage]: 42 | if self.tool_call_id: 43 | return ToolMessage( 44 | tool_call_id=self.tool_call_id, 45 | content=self.function_response 46 | ) 47 | raise ValueError("tool_call_id is empty") 48 | 49 | def get_function_message(self) -> Union[FunctionMessage]: 50 | if self.name: 51 | return FunctionMessage( 52 | name=self.name, 53 | content=self.function_response 54 | ) 55 | raise ValueError("name is empty") 56 | 57 | """当前链条的层级""" 58 | sign_as: Tuple[int, str, str] = Field((0, "root", "default"), description="签名") 59 | 60 | """函数并行的信息""" 61 | plan_chain_archive: List[Tuple[TaskBatch, Union[Exception, dict, str]]] = Field( 62 | default=[], 63 | description="完成的节点" 64 | ) 65 | plan_chain_pending: List[TaskBatch] = Field(default=[], description="待完成的节点") 66 | plan_chain_length: int = Field(default=0, description="节点长度") 67 | plan_chain_complete: Optional[bool] = Field(False, description="是否完成此集群") 68 | 69 | """功能状态与缓存""" 70 | function_enable: bool = Field(False, description="功能开关") 71 | function_list: List[Function] = Field([], description="功能列表") 72 | function_salvation_list: List[Function] = Field([], description="上回合的功能列表,用于容错") 73 | 74 | """携带插件的写回结果""" 75 | write_back: bool = Field(False, description="写回消息") 76 | callback: List[Callback] = Field( 77 | default=[], 78 | description="用于回写,插件返回的消息头,标识 function 的名字" 79 | ) 80 | 81 | """接收器的路由规则""" 82 | callback_forward: bool = Field(False, description="转发消息") 83 | callback_forward_reprocess: bool = Field(False, description="转发消息,但是要求再次处理") 84 | direct_reply: bool = Field(False, description="直接回复,跳过函数处理等") 85 | 86 | release_chain: bool = Field(False, description="是否响应队列中的函数集群拉起请求") 87 | """部署点的生长规则""" 88 | resign_next_step: bool = Field(True, description="函数集群是否可以继续拉起其他函数集群") 89 | run_step_already: int = Field(0, description="函数集群计数器") 90 | run_step_limit: int = Field(4, description="函数集群计数器上限") 91 | 92 | """函数中枢的依赖变量""" 93 | verify_uuid: Optional[str] = Field(None, description="认证链的UUID,根据此UUID和 Map 可以确定哪个需要执行") 94 | verify_map: Dict[str, TaskBatch] = Field({}, 95 | description="函数节点的认证信息,经携带认证重发后可通过") 96 | llm_result: Any = Field(None, description="存储任务的衍生信息源") 97 | llm_type: Optional[str] = Field(None, description="存储任务的衍生信息源类型") 98 | extra_args: dict = Field({}, description="提供额外参数") 99 | 100 | model_config = ConfigDict(arbitrary_types_allowed=True) 101 | 102 | def chain(self, 103 | name, 104 | write_back: bool, 105 | release_chain: bool 106 | ) -> "Meta": 107 | """ 108 | 生成副本,重置链条 109 | """ 110 | self.sign_as = (self.sign_as[0] + 1, "chain", name) 111 | self.run_step_already += 1 112 | self.callback_forward = False 113 | self.callback_forward_reprocess = False 114 | self.direct_reply = False 115 | self.write_back = write_back 116 | self.release_chain = release_chain 117 | return self.model_copy(deep=True) 118 | 119 | 120 | someUnexpectedType = ... 121 | Meta( 122 | sign_as=(0, "root", "test"), 123 | release_chain=True, 124 | function_enable=True, 125 | llm_result=someUnexpectedType, 126 | ).chain("test", True, True) 127 | -------------------------------------------------------------------------------- /docs/test_script/pydantic_feat.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/12 下午9:12 3 | # @Author : sudoskys 4 | # @File : pydantic_feat.py 5 | # @Software: PyCharm 6 | from pydantic import field_validator, BaseModel, ValidationError 7 | 8 | 9 | class UserModel(BaseModel): 10 | id: int 11 | name: str 12 | 13 | @field_validator('name') 14 | def name_must_contain_space(cls, v: str) -> str: 15 | if ' ' not in v: 16 | raise ValueError('must contain a space') 17 | return v.title() 18 | 19 | 20 | try: 21 | UserModel(id=0, name='JohnDoe') 22 | except ValidationError as e: 23 | print(e) 24 | -------------------------------------------------------------------------------- /docs/test_script/pydantic_function.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/13 上午2:00 3 | # @Author : sudoskys 4 | # @File : pydantic_function.py 5 | # @Software: PyCharm 6 | from pprint import pprint 7 | 8 | from llmkira.sdk.schema import Function 9 | from pydantic import BaseModel, ConfigDict, field_validator, Field 10 | 11 | 12 | class Alarm(BaseModel): 13 | """ 14 | Set a timed reminder (only for minutes) 15 | """ 16 | delay: int = Field(5, description="The delay time, in minutes") 17 | content: str = Field(..., description="reminder content") 18 | model_config = ConfigDict(extra="allow") 19 | 20 | @field_validator("delay") 21 | def delay_validator(cls, v): 22 | if v < 0: 23 | raise ValueError("delay must be greater than 0") 24 | return v 25 | 26 | 27 | result = Function.parse_from_pydantic(schema_model=Alarm, plugin_name="set_alarm_reminder") 28 | 29 | pprint(result) 30 | -------------------------------------------------------------------------------- /docs/test_script/pydantic_mo.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/11 下午3:42 3 | # @Author : sudoskys 4 | # @File : pydantic_mo.py 5 | # @Software: PyCharm 6 | from typing import Optional 7 | 8 | from pydantic import BaseModel 9 | 10 | 11 | class User(BaseModel): 12 | id: int 13 | name: str 14 | age: Optional[int] = None 15 | 16 | 17 | if __name__ == '__main__': 18 | u1 = User(id=1, name="sudoskys") 19 | u11 = User(id=1, name="sudoskys") 20 | u2 = User(id=2, name="sudoskys", age=18) 21 | u3 = User(id=3, name="sudoskys", age=18) 22 | print(u1 == u11) 23 | l1 = [u1, u2] 24 | print(u1 in l1) 25 | l1.remove(u11) 26 | print(l1) 27 | -------------------------------------------------------------------------------- /docs/test_script/survey_arg_parse.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | 5 | def parse_env(env_string): 6 | if not env_string.endswith(";"): 7 | env_string = env_string + ";" 8 | pattern = r'(\w+)\s*=\s*(.*?)\s*;' 9 | matches = re.findall(pattern, env_string, re.MULTILINE) 10 | _env_table = {} 11 | for match in matches: 12 | _key = match[0] 13 | _value = match[1] 14 | _value = _value.strip().strip("\"") 15 | _key = _key.upper() 16 | _env_table[_key] = _value 17 | print(_env_table) 18 | return _env_table 19 | 20 | 21 | env = """USER_NAME="admin" ;asdfasd=123;adsad=;""" 22 | 23 | parse_env(env) 24 | -------------------------------------------------------------------------------- /docs/test_script/survey_arg_parser.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/16 下午1:46 3 | # @Author : sudoskys 4 | # @File : arg.py.py 5 | # @Software: PyCharm 6 | from typing import Optional 7 | 8 | from pydantic import BaseModel, Field 9 | 10 | 11 | class Search(BaseModel): 12 | """ 13 | 测试搜索类型 14 | """ 15 | keywords: Optional[str] = Field(None, description="关键词") 16 | text: Optional[str] = Field(None, description="文本") 17 | 18 | def run(self): 19 | return self.keywords + self.text 20 | 21 | 22 | print(Search.model_json_schema()) 23 | -------------------------------------------------------------------------------- /docs/test_script/survey_chatglm_tokenzier.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/5 下午9:56 3 | # @Author : sudoskys 4 | # @File : tokenzier.py 5 | # @Software: PyCharm 6 | 7 | len([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) 8 | 9 | from transformers import AutoTokenizer 10 | 11 | tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True) 12 | some = tokenizer("你好", return_tensors="pt") 13 | print(some) 14 | 15 | -------------------------------------------------------------------------------- /docs/test_script/survey_khl_lib.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/20 下午1:26 3 | # @Author : sudoskys 4 | # @File : khl.py 5 | # @Software: PyCharm 6 | import asyncio 7 | 8 | from khl import Bot, api, HTTPRequester, Cert, MessageTypes 9 | 10 | # import nest_asyncio 11 | # nest_asyncio.apply() 12 | 13 | bot_token = "xxx" 14 | # init Bot 15 | bot = Bot(cert=Cert(token=bot_token)) 16 | 17 | 18 | # register command, send `/hello` in channel to invoke 19 | async def world(): 20 | httpr = HTTPRequester(cert=Cert(token=bot_token)) 21 | _request = await httpr.exec_req( 22 | api.DirectMessage.create( 23 | target_id="15648861098", # :) 24 | type=9, 25 | content="hello23", 26 | # temp_target_id=msg.author.id, 27 | ) 28 | ) 29 | _request = await httpr.exec_req( 30 | api.DirectMessage.create( 31 | target_id="15648861098", 32 | type=9, 33 | content="hello232342", 34 | # temp_target_id=msg.author.id, 35 | ) 36 | ) 37 | # return 38 | await bot.client.gate.exec_req( 39 | api.DirectMessage.create( 40 | target_id="15648861098", 41 | content="hello!---", 42 | type=9, 43 | ) 44 | ) 45 | # return 46 | msg = None 47 | await bot.client.send(target=msg.ctx.channel, content="hello") 48 | # return 49 | print(msg.ctx.channel.id) 50 | print(await msg.ctx.channel.send("h")) 51 | await HTTPRequester(cert=Cert(token=bot_token)).exec_req( 52 | api.Message.create( 53 | target_id=msg.ctx.channel.id, 54 | type=MessageTypes.KMD.value, 55 | content="hello!", 56 | # temp_target_id=msg.author.id, 57 | ) 58 | ) 59 | 60 | 61 | print("hello world") 62 | loop = asyncio.get_event_loop() 63 | loop.run_until_complete(world()) 64 | -------------------------------------------------------------------------------- /docs/test_script/test_choice_in_sh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Are you sure you want to proceed? (y/n)" 4 | read -r response 5 | if [[ $response =~ ^([yY][eE][sS]|[yY])$ ]]; then 6 | # Do something 7 | echo "Aborting." 8 | else 9 | echo "Aborting." 10 | fi 11 | -------------------------------------------------------------------------------- /docs/test_script/trash_cluster_usage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/31 上午10:42 3 | # @Author : sudoskys 4 | # @File : memory_test.py 5 | # @Software: PyCharm 6 | from llmkira.sdk.filter import reduce 7 | pre = reduce.Cluster() 8 | print(pre) 9 | # from llmkira.sdk.filter import langdetect_fasttext 10 | 11 | # print(langdetect_fasttext) 12 | -------------------------------------------------------------------------------- /docs/test_script/trash_mongodb_class.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/27 上午1:04 3 | # @Author : sudoskys 4 | # @File : test_mongodb.py 5 | # @Software: PyCharm 6 | from llmkira.extra.user.client import UserConfigClient 7 | from llmkira.extra.user import UserConfig 8 | 9 | if __name__ == '__main__': 10 | async def main(): 11 | _es = await UserConfigClient().read_by_uid("test2") 12 | print(_es) 13 | test2 = UserConfig(uid="test2", 14 | created_time=0) 15 | test2_ = await UserConfigClient().update(uid="test2", data=test2) 16 | print(test2_) 17 | 18 | 19 | import asyncio 20 | 21 | loop = asyncio.get_event_loop() 22 | loop.run_until_complete(main()) 23 | -------------------------------------------------------------------------------- /docs/test_script/trash_transfer_note.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/22 下午7:37 3 | # @Author : sudoskys 4 | # @File : trans.py 5 | # @Software: PyCharm 6 | from typing import List, Any 7 | 8 | from llmkira.sdk.schema import File 9 | from llmkira.openapi.transducer import resign_transfer, Builder, Parser 10 | 11 | __receiver_name__ = "discord" 12 | 13 | 14 | @resign_transfer() 15 | class Builder(Builder): 16 | def build(self, message, *args) -> (bool, List[File]): 17 | """ 18 | 仅仅 hook LLM 的正常回复,即 reply 函数。 19 | :param message: 单条通用消息 (RawMessage) 20 | :param args: 其他参数 21 | :return: 是否放弃发送文本, 需要发送的文件列表(RawMessage.upload) 22 | """ 23 | return False, [] 24 | 25 | 26 | """ 27 | _transfer = TransferManager().receiver_builder(agent_name=__receiver__) 28 | only_send_file, file_list = _transfer().build(message=item) 29 | """ 30 | 31 | 32 | @resign_transfer(agent_name=__receiver_name__) 33 | class Parser(Parser): 34 | async def pipe(self, arg: dict) -> Any: 35 | pass 36 | 37 | def parse(self, message, file: List[File], *args) -> (list, List[File]): 38 | """ 39 | 接收 sender 平台的 **原始** 消息,返回文件。 40 | 需要注意的是,这里的 message 是原始消息,不是我们转换后的通用消息类型。 41 | :param message: 单条原始消息 42 | :param file: 文件列表 43 | :param args: 其他参数 44 | :return: 返回 **追加的** 消息列表,返回文件列表, 45 | """ 46 | return [], file 47 | 48 | 49 | """ 50 | # 转析器 51 | _transfer = TransferManager().sender_parser(agent_name=__sender__) 52 | deliver_back_message, _file = _transfer().parse(message=message, file=_file) 53 | """ 54 | -------------------------------------------------------------------------------- /docs/test_script/web_craw_note/note_unstructured.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/6 下午9:21 3 | # @Author : sudoskys 4 | # @File : note_unstructured.py 5 | # @Software: PyCharm 6 | from typing import List, Optional 7 | 8 | from pydantic import BaseModel, Field 9 | from rich import print 10 | from unstructured.partition import auto 11 | 12 | test_url_list = [ 13 | "https://blog.cuijiacai.com/blog-building/", 14 | "https://github.com/LlmKira/Openaibot", 15 | "https://react.dev/learn/tutorial-tic-tac-toe", 16 | "https://blog.csdn.net/weixin_39198406/article/details/106418574", 17 | ] 18 | 19 | 20 | class UnstructuredElement(BaseModel): 21 | class Meta(BaseModel): 22 | url: str 23 | title: Optional[str] = Field(None, alias="title") 24 | filetype: Optional[str] = Field(None, alias="filetype") 25 | page_number: int = Field(None, alias="page_number") 26 | languages: List[str] = Field(None, alias="languages") 27 | category_depth: int = Field(None, alias="category_depth") 28 | link_urls: List[str] = Field(None, alias="link_urls") 29 | link_texts: Optional[str] = Field(None, alias="link_text") 30 | 31 | text: str 32 | metadata: Meta 33 | element_id: str 34 | type: str 35 | 36 | class Config: 37 | extra = "ignore" 38 | 39 | @classmethod 40 | def from_element(cls, anything): 41 | return cls(**anything.to_dict()) 42 | 43 | 44 | def autos(): 45 | for url in test_url_list: 46 | print(f"\ntest url is {url}") 47 | elements = auto.partition(url=url) 48 | titled = False 49 | for element in elements: 50 | obj = UnstructuredElement.from_element(element) 51 | if len(obj.text.strip().strip("\n")) > 5: 52 | if obj.type != "Title": 53 | titled = True 54 | else: 55 | if titled: 56 | continue 57 | print(obj.text) 58 | 59 | print("=====================================\n") 60 | assert len(elements) > -1 61 | 62 | 63 | autos() 64 | -------------------------------------------------------------------------------- /docs/test_script/web_craw_note/note_web_sumy.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/11/6 下午8:59 3 | # @Author : sudoskys 4 | # @File : web_sumy.py 5 | # @Software: PyCharm 6 | # -*- coding: utf-8 -*- 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division, print_function, unicode_literals 10 | 11 | from sumy.parsers.html import HtmlParser 12 | from sumy.parsers.plaintext import PlaintextParser 13 | from sumy.nlp.tokenizers import Tokenizer 14 | from sumy.summarizers.lsa import LsaSummarizer as Summarizer 15 | from sumy.nlp.stemmers import Stemmer 16 | from sumy.utils import get_stop_words 17 | 18 | LANGUAGE = "english" 19 | SENTENCES_COUNT = 10 20 | 21 | if __name__ == "__main__": 22 | url = "https://react.dev/learn/tutorial-tic-tac-toe" 23 | parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE)) 24 | # or for plain text files 25 | # parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE)) 26 | # parser = PlaintextParser.from_string("Check this out.", Tokenizer(LANGUAGE)) 27 | stemmer = Stemmer(LANGUAGE) 28 | 29 | summarizer = Summarizer(stemmer) 30 | summarizer.stop_words = get_stop_words(LANGUAGE) 31 | 32 | for sentence in summarizer(parser.document, SENTENCES_COUNT): 33 | print(sentence) 34 | -------------------------------------------------------------------------------- /docs/timer_func.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/docs/timer_func.gif -------------------------------------------------------------------------------- /docs/translate_file_func.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/docs/translate_file_func.gif -------------------------------------------------------------------------------- /docs/vision.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/docs/vision.gif -------------------------------------------------------------------------------- /llmkira/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/14 下午2:17 3 | 4 | from .sdk.tools import get_available_plugin_names as get_available_plugin_names 5 | from .sdk.tools import get_entrypoint_plugins as get_entrypoint_plugins 6 | from .sdk.tools import get_loaded_plugins as get_loaded_plugins 7 | from .sdk.tools import get_plugin as get_plugin 8 | from .sdk.tools import load_all_plugins as load_all_plugins 9 | from .sdk.tools import load_builtin_plugin as load_builtin_plugin 10 | from .sdk.tools import load_builtin_plugins as load_builtin_plugins 11 | from .sdk.tools import load_from_entrypoint as load_from_entrypoint 12 | from .sdk.tools import load_plugin as load_plugin 13 | from .sdk.tools import load_plugins as load_plugins 14 | from .sdk.tools import require as require 15 | -------------------------------------------------------------------------------- /llmkira/_exception.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/30 下午3:42 3 | 4 | 5 | class LlmkiraException(Exception): 6 | pass 7 | 8 | 9 | class SettingError(LlmkiraException): 10 | pass 11 | 12 | 13 | class CacheDatabaseError(LlmkiraException): 14 | pass 15 | -------------------------------------------------------------------------------- /llmkira/cache/elara_runtime.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/7/10 下午9:44 3 | import asyncio 4 | import json 5 | from typing import Union 6 | 7 | import elara 8 | from loguru import logger 9 | 10 | from .runtime_schema import AbstractDataClass, PREFIX 11 | 12 | 13 | class ElaraClientAsyncWrapper(AbstractDataClass): 14 | """ 15 | Elara 数据基类 16 | """ 17 | 18 | def __init__(self, backend, prefix=PREFIX): 19 | self.prefix = prefix 20 | self.elara = elara.exe_cache(path=backend) 21 | self.lock = asyncio.Lock() 22 | 23 | async def ping(self): 24 | return True 25 | 26 | def update_backend(self, backend): 27 | self.elara = elara.exe_cache(path=backend) 28 | return True 29 | 30 | async def read_data(self, key): 31 | """ 32 | Read data from elara 33 | """ 34 | data = self.elara.get(self.prefix + str(key)) 35 | if data is not None: 36 | try: 37 | data = json.loads(data) 38 | except Exception as ex: 39 | logger.trace(ex) 40 | return data 41 | 42 | async def set_data(self, key, value: Union[dict, str, bytes], timeout: int = None): 43 | """ 44 | Set data to elara 45 | :param key: 46 | :param value: 47 | :param timeout: seconds 48 | :return: 49 | """ 50 | self.elara.set(self.prefix + str(key), value, max_age=timeout) 51 | self.elara.commit() 52 | -------------------------------------------------------------------------------- /llmkira/cache/lmdb_runtime.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import json 3 | from typing import Union, Optional 4 | 5 | import lmdb 6 | from loguru import logger 7 | 8 | from .runtime_schema import AbstractDataClass, PREFIX 9 | 10 | 11 | class LMDBClientAsyncWrapper(AbstractDataClass): 12 | """ 13 | LMDB 数据基类 14 | """ 15 | 16 | def __init__(self, backend, prefix=PREFIX): 17 | self.prefix = prefix 18 | self.env = lmdb.open(backend) 19 | self.lock = asyncio.Lock() 20 | 21 | async def ping(self): 22 | return True 23 | 24 | def update_backend(self, backend): 25 | self.env = lmdb.open(backend) 26 | return True 27 | 28 | async def read_data(self, key) -> Optional[Union[dict, str, bytes]]: 29 | """ 30 | Read data from LMDB 31 | """ 32 | data = None 33 | async with self.lock: 34 | with self.env.begin() as txn: 35 | raw_data = txn.get((self.prefix + str(key)).encode()) 36 | if raw_data is not None: 37 | try: 38 | data = json.loads(raw_data.decode()) 39 | except json.JSONDecodeError: 40 | # 如果JSON解码失败,并且数据以一个utf8字符串开头,我们假定数据是字符串 41 | if raw_data.startswith(b'{"') is False: 42 | data = raw_data.decode() 43 | except UnicodeDecodeError: 44 | # 如果Unicode解码失败,我们假定数据是字节型数据 45 | data = raw_data 46 | except Exception as ex: 47 | logger.trace(ex) 48 | return data 49 | 50 | async def set_data(self, key, value: Union[dict, str, bytes], timeout: int = None): 51 | """ 52 | Set data to LMDB 53 | :param key: 54 | :param value: a dict, str or bytes 55 | :param timeout: seconds 56 | :return: 57 | """ 58 | async with self.lock: 59 | with self.env.begin(write=True) as txn: 60 | if isinstance(value, (dict, list)): 61 | value = json.dumps(value).encode() 62 | elif isinstance(value, str): 63 | # 如果数据是一个字符串,我们将其编码为字节数据 64 | value = value.encode() 65 | # 对于字节类型的数据,我们直接存储 66 | txn.put((self.prefix + str(key)).encode(), value) 67 | return True 68 | -------------------------------------------------------------------------------- /llmkira/cache/redis_runtime.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/7/10 下午9:43 3 | import json 4 | from typing import Optional, Union 5 | 6 | import redis 7 | from loguru import logger 8 | from redis.asyncio.client import Redis 9 | from redis.asyncio.connection import ConnectionPool 10 | 11 | from .runtime_schema import AbstractDataClass, PREFIX 12 | 13 | 14 | class RedisClientWrapper(AbstractDataClass): 15 | """ 16 | Redis 数据类 17 | """ 18 | 19 | def __init__(self, backend, prefix=PREFIX): 20 | self.prefix = prefix 21 | self.connection_pool = redis.asyncio.ConnectionPool.from_url(backend) 22 | self._redis = redis.asyncio.Redis(connection_pool=self.connection_pool) 23 | 24 | async def ping(self): 25 | return await self._redis.ping() 26 | 27 | def update_backend(self, backend): 28 | self.connection_pool = ConnectionPool.from_url(backend) 29 | self._redis = Redis(connection_pool=self.connection_pool) 30 | return True 31 | 32 | async def set_data(self, key, value: Union[dict, str, bytes], timeout=None): 33 | if isinstance(value, (dict, list)): 34 | value = json.dumps(value) 35 | return await self._redis.set( 36 | name=f"{self.prefix}{key}", value=value, ex=timeout 37 | ) 38 | 39 | async def read_data(self, key) -> Optional[Union[str, dict, int]]: 40 | data = await self._redis.get(self.prefix + str(key)) 41 | if data is not None: 42 | try: 43 | data = json.loads(data) 44 | except Exception as ex: 45 | logger.trace(ex) 46 | return data 47 | -------------------------------------------------------------------------------- /llmkira/cache/runtime_schema.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/7/10 下午9:43 3 | from abc import abstractmethod, ABC 4 | from typing import Any, Union 5 | 6 | PREFIX = "oai_bot:" 7 | 8 | 9 | def singleton(cls): 10 | _instance = {} 11 | 12 | def _singleton(*args, **kargs): 13 | if cls not in _instance: 14 | _instance[cls] = cls(*args, **kargs) 15 | return _instance[cls] 16 | 17 | return _singleton 18 | 19 | 20 | class BaseRuntime(ABC): 21 | init_already = False 22 | client = None 23 | dsn = None 24 | 25 | @staticmethod 26 | def check_client_dsn(dsn): 27 | raise NotImplementedError 28 | 29 | def check_client(self) -> bool: 30 | raise NotImplementedError 31 | 32 | def init_client(self, verbose=False): 33 | raise NotImplementedError 34 | 35 | def get_client(self) -> "AbstractDataClass": 36 | raise NotImplementedError 37 | 38 | 39 | class AbstractDataClass(ABC): 40 | @abstractmethod 41 | async def ping(self): 42 | return True 43 | 44 | @abstractmethod 45 | def update_backend(self, backend): 46 | pass 47 | 48 | @abstractmethod 49 | async def set_data( 50 | self, key: str, value: Union[dict, str, bytes], timeout: int = None 51 | ) -> Any: 52 | pass 53 | 54 | @abstractmethod 55 | async def read_data(self, key: str) -> Any: 56 | pass 57 | -------------------------------------------------------------------------------- /llmkira/extra/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/18 下午6:04 3 | # @Author : sudoskys 4 | # @File : __init__.py 5 | # @Software: PyCharm 6 | 7 | -------------------------------------------------------------------------------- /llmkira/extra/plugins/search/engine.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import List 3 | 4 | import requests 5 | from duckduckgo_search import AsyncDDGS 6 | from loguru import logger 7 | from pydantic import BaseModel 8 | 9 | 10 | class SearchEngine(BaseModel): 11 | api_key: str 12 | 13 | def search(self, search_term: str): 14 | raise NotImplementedError 15 | 16 | 17 | class SearchEngineResult(BaseModel): 18 | title: str 19 | link: str 20 | snippet: str 21 | 22 | 23 | class SerperSearchEngine(SearchEngine): 24 | api_key: str 25 | 26 | async def search(self, search_term: str) -> List[SearchEngineResult]: 27 | url = "https://google.serper.dev/search" 28 | payload = json.dumps( 29 | { 30 | "q": search_term, 31 | } 32 | ) 33 | headers = {"X-API-KEY": self.api_key, "Content-Type": "application/json"} 34 | response = requests.request("POST", url, headers=headers, data=payload) 35 | result_list = response.json()["organic"] 36 | _result = [] 37 | logger.debug(f"Got {len(result_list)} results") 38 | for item in result_list: 39 | _result.append( 40 | SearchEngineResult( 41 | title=item.get("title", "Undefined"), 42 | link=item.get("link", "Undefined"), 43 | snippet=item.get("snippet", "Undefined"), 44 | ) 45 | ) 46 | _result = _result[:4] 47 | return _result 48 | 49 | 50 | async def search_in_duckduckgo(search_sentence: str): 51 | try: 52 | search_result = await AsyncDDGS().text( 53 | search_sentence, safesearch="off", timelimit="y", max_results=10 54 | ) 55 | except Exception as e: 56 | raise ValueError( 57 | f"Search Failed: DuckDuckGo Error now not available: {type(e)}" 58 | ) 59 | else: 60 | _build_result = [] 61 | for result in search_result: 62 | _build_result.append( 63 | SearchEngineResult( 64 | title=result.get("title", "Undefined"), 65 | link=result.get("href", "Undefined"), 66 | snippet=result.get("body", "Undefined"), 67 | ) 68 | ) 69 | return _build_result 70 | 71 | 72 | def build_search_tips(search_items: List[SearchEngineResult], limit=5): 73 | search_tips = [] 74 | assert isinstance( 75 | search_items, list 76 | ), f"Search Result should be a list, but got {type(search_items)}" 77 | for index, item in enumerate(search_items): 78 | if index >= limit: 79 | break 80 | search_tips.append( 81 | f" " 82 | f"\n{item.snippet}\n" 83 | f"" 84 | ) 85 | return "Search Api:\n" + "\n".join(search_tips) 86 | -------------------------------------------------------------------------------- /llmkira/extra/voice_hook.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from fast_langdetect import detect_multilingual 4 | from loguru import logger 5 | 6 | from llmkira.extra.voice import request_cn, request_en 7 | from llmkira.kv_manager.env import EnvManager 8 | from llmkira.kv_manager.file import File 9 | from llmkira.openapi.hook import resign_hook, Hook, Trigger 10 | from llmkira.sdk.utils import Ffmpeg 11 | from llmkira.task.schema import EventMessage, Location 12 | 13 | 14 | def detect_text(text: str) -> list: 15 | """ 16 | 检测文本的语言 17 | :param text: 文本 18 | :return: 语言 19 | """ 20 | text = text.replace("\n", "") 21 | text = text[:200] 22 | parsed_text = detect_multilingual(text) 23 | if not parsed_text: 24 | return [] 25 | lang_kinds = [] 26 | for lang in parsed_text: 27 | lang_ = lang.get("lang", None) 28 | if lang_: 29 | lang_kinds.append(lang_) 30 | return lang_kinds 31 | 32 | 33 | def check_string(text): 34 | """ 35 | 检查字符串是否 TTS 可以处理 36 | :param text: 字符串 37 | :return: 是否符合要求 38 | """ 39 | lang_kinds = detect_text(text) 40 | limit = 200 41 | if len(set(lang_kinds)) == 1: 42 | if lang_kinds[0] in ["en"]: 43 | limit = 500 44 | if "\n\n" in text or text.count("\n") > 3 or len(text) > limit or "```" in text: 45 | return False 46 | return True 47 | 48 | 49 | @resign_hook() 50 | class VoiceHook(Hook): 51 | trigger: Trigger = Trigger.RECEIVER 52 | 53 | async def trigger_hook(self, *args, **kwargs) -> bool: 54 | platform_name: str = kwargs.get("platform") # noqa 55 | messages: List[EventMessage] = kwargs.get("messages") 56 | locate: Location = kwargs.get("locate") 57 | for message in messages: 58 | if not check_string(message.text): 59 | return False 60 | have_env = await EnvManager(locate.uid).get_env("VOICE_REPLY_ME", None) 61 | # logger.warning(f"Voice Hook {have_env}") 62 | if have_env is not None: 63 | return True 64 | return False 65 | 66 | async def hook_run(self, *args, **kwargs): 67 | logger.debug(f"Voice Hook {args} {kwargs}") 68 | platform_name: str = kwargs.get("platform") # noqa 69 | messages: List[EventMessage] = kwargs.get("messages") 70 | locate: Location = kwargs.get("locate") 71 | for message in messages: 72 | if not check_string(message.text): 73 | return args, kwargs 74 | lang_kinds = detect_text(message.text) 75 | reecho_api_key = await EnvManager(locate.uid).get_env( 76 | "REECHO_VOICE_KEY", None 77 | ) 78 | if (len(set(lang_kinds)) == 1) and (lang_kinds[0] in ["en"]): 79 | voice_data = await request_en(message.text) 80 | else: 81 | voice_data = await request_cn( 82 | message.text, reecho_api_key=reecho_api_key 83 | ) 84 | if voice_data is not None: 85 | ogg_data = Ffmpeg.convert( 86 | input_c="mp3", output_c="ogg", stream_data=voice_data, quiet=True 87 | ) 88 | file = await File.upload_file( 89 | creator=locate.uid, file_name="speech.ogg", file_data=ogg_data 90 | ) 91 | file.caption = message.text 92 | message.text = "" 93 | message.files.append(file) 94 | else: 95 | logger.error(f"Voice Generation Failed:{message.text}") 96 | return args, kwargs 97 | -------------------------------------------------------------------------------- /llmkira/kv_manager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LlmKira/Openaibot/f86521c8254a7d3143be70fdefb8e48b7a15407d/llmkira/kv_manager/__init__.py -------------------------------------------------------------------------------- /llmkira/kv_manager/_base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Union 3 | 4 | from llmkira.cache import global_cache_runtime 5 | 6 | 7 | class KvManager(ABC): 8 | client = global_cache_runtime.get_client() 9 | 10 | @abstractmethod 11 | def prefix(self, key: str) -> str: 12 | return f"kv:{key}" 13 | 14 | async def read_data(self, key: str): 15 | data_key = self.prefix(key) 16 | result = await self.client.read_data(data_key) 17 | return result 18 | 19 | async def save_data(self, key: str, value: Union[str, bytes], timeout: int = None): 20 | data_key = self.prefix(key) 21 | await self.client.set_data(key=data_key, value=value, timeout=timeout) 22 | -------------------------------------------------------------------------------- /llmkira/kv_manager/env.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | from typing import Dict, Union, Optional 4 | 5 | from loguru import logger 6 | 7 | from ._base import KvManager 8 | 9 | 10 | def parse_env_string(env_string) -> Dict[str, str]: 11 | if not env_string.endswith(";"): 12 | env_string = env_string + ";" 13 | pattern = r"(\w+)\s*=\s*(.*?)\s*;" 14 | matches = re.findall(pattern, env_string, re.MULTILINE) 15 | env_table = {} 16 | for match in matches: 17 | env_key = f"{match[0]}" 18 | env_value = f"{match[1]}" 19 | env_value = env_value.strip().strip('"') 20 | env_key = env_key.upper() 21 | if env_value.upper() == "NONE": 22 | env_value = None 23 | env_table[env_key] = env_value 24 | return env_table 25 | 26 | 27 | class EnvManager(KvManager): 28 | def __init__(self, user_id: str): 29 | self.user_id = str(user_id) 30 | 31 | def prefix(self, key: str) -> str: 32 | return f"env:{key}" 33 | 34 | async def get_env(self, env_name, default) -> Optional[str]: 35 | result = await self.read_env() 36 | if not result: 37 | return default 38 | return result.get(env_name, default) 39 | 40 | async def read_env(self) -> Optional[dict]: 41 | result = await self.read_data(self.user_id) 42 | if isinstance(result, bytes): 43 | result = result.decode("utf-8") 44 | if not result: 45 | return None 46 | try: 47 | if isinstance(result, dict): 48 | return result 49 | return json.loads(result) 50 | except Exception as e: 51 | logger.error( 52 | f"operation failed: env string cant be parsed by json.loads {e}" 53 | ) 54 | return None 55 | 56 | async def set_env( 57 | self, env_value: Union[dict, str], update=False, return_all=False 58 | ) -> Dict[str, str]: 59 | current_env = {} 60 | if update: 61 | current_env = await self.read_env() 62 | if not current_env: 63 | current_env = {} 64 | if isinstance(env_value, str): 65 | env_map = parse_env_string(env_value) 66 | elif isinstance(env_value, dict): 67 | env_map = env_value 68 | else: 69 | raise ValueError("Env String Should be dict or str") 70 | # 更新 71 | current_env = {**current_env, **env_map} 72 | # 去除 None 73 | current_env = {k: v for k, v in current_env.items() if v is not None} 74 | await self.save_data(self.user_id, json.dumps(current_env)) 75 | if return_all: 76 | return current_env 77 | return env_map 78 | -------------------------------------------------------------------------------- /llmkira/kv_manager/file.py: -------------------------------------------------------------------------------- 1 | from hashlib import md5 2 | from io import BytesIO 3 | from typing import Union, Optional 4 | 5 | from pydantic import BaseModel, Field 6 | 7 | from llmkira._exception import CacheDatabaseError 8 | from llmkira.kv_manager._base import KvManager 9 | 10 | FILE_EXPIRE_TIME = 60 * 60 * 24 11 | MAX_UPLOAD_FILE_SIZE = 15 * 1024 * 1024 # 15MB 12 | 13 | 14 | def generate_file_md5( 15 | data: Union[str, bytes, BytesIO], length: int = 8, upper: bool = True 16 | ) -> str: 17 | """ 18 | # WARNING: This function is not suitable for BIG files. 19 | Generate a unique short MD5 Hash. 20 | :param data: The data to be hashed. 21 | :param length: The desired length of the hashed object; it must be between 1 and 32 (inclusive). 22 | :param upper: Flag denoting whether to return the hash in uppercase (default is True). 23 | :return: A unique short digest of the hashed object. 24 | :raises AssertionError: If length is not within the range 1-32 inclusive. 25 | """ 26 | 27 | assert ( 28 | 0 < length <= 32 29 | ), "length must be less than or equal to 32 and more than zero." 30 | 31 | hash_md5 = md5() 32 | 33 | if isinstance(data, str): 34 | hash_md5.update(data.encode("utf-8")) 35 | elif isinstance(data, bytes): 36 | hash_md5.update(data) 37 | elif isinstance(data, BytesIO): 38 | for chunk in iter(lambda: data.read(4096), b""): 39 | hash_md5.update(chunk) 40 | 41 | # Generate the MD5 hash. 42 | digest = hash_md5.hexdigest() 43 | # Shorten to the required length and uppercase if necessary. 44 | short_id = digest[:length].upper() if upper else digest[:length] 45 | return short_id 46 | 47 | 48 | class File(BaseModel): 49 | creator: str = Field(description="创建用户") 50 | file_name: str = Field(description="文件名") 51 | file_key: str = Field(description="文件 Key") 52 | caption: Optional[str] = Field(default=None, description="文件描述") 53 | 54 | async def download_file(self) -> Optional[bytes]: 55 | """ 56 | Download the file from the cache. 57 | If the file is not found in the cache, the method will return None. 58 | :return: The file data if found, otherwise None. 59 | """ 60 | return await GLOBAL_FILE_HANDLER.download_file(self.file_key) 61 | 62 | @classmethod 63 | async def upload_file( 64 | cls, creator: str, file_name: str, file_data: Union[bytes, BytesIO] 65 | ) -> "File": 66 | """ 67 | Upload a file to the cache. 68 | If file_data is greater than the size limit (15MB), a CacheDatabaseError will be raised. 69 | :param creator: The creator of the file. 70 | :param file_name: The name of the file. 71 | :param file_data: The file to be uploaded, either a bytes object or a BytesIO stream. 72 | :return: A File object representing the uploaded file. 73 | :raises CacheDatabaseError: If the file size exceeds the limit of 15MB. 74 | """ 75 | file_key = await GLOBAL_FILE_HANDLER.upload_file(file_data) 76 | return cls(creator=creator, file_name=file_name, file_key=file_key) 77 | 78 | 79 | class FileHandler(KvManager): 80 | def prefix(self, key: str) -> str: 81 | return f"file:{key}" 82 | 83 | async def upload_file( 84 | self, 85 | file_data: Union[bytes, BytesIO], 86 | ) -> str: 87 | """ 88 | Upload a file to the cache, and return its unique ID. 89 | The file_data argument is the file to be uploaded, either a bytes object or a BytesIO stream. 90 | If file_data is greater than the size limit (15MB), a CacheDatabaseError will be raised. 91 | The method will return the unique ID for the uploaded file. 92 | :param file_data: The file to be uploaded. 93 | :return: The unique ID of the uploaded file. 94 | :raises CacheDatabaseError: If the file size exceeds the limit of 15MB. 95 | """ 96 | 97 | if isinstance(file_data, BytesIO): 98 | file_data = file_data.read() 99 | 100 | if len(file_data) > MAX_UPLOAD_FILE_SIZE: 101 | raise CacheDatabaseError("File size exceeds the limit of 15MB") 102 | 103 | file_id = generate_file_md5(file_data) 104 | await self.save_data(file_id, file_data, timeout=FILE_EXPIRE_TIME) 105 | return file_id 106 | 107 | async def download_file(self, file_id: str) -> Optional[bytes]: 108 | """ 109 | Download a file identified by file_id from the cache. 110 | If the file is not found in the cache, the method will return None. 111 | :param file_id: The unique ID of the file to be downloaded. 112 | :return: The file data if found, otherwise None. 113 | """ 114 | result = await self.read_data(file_id) 115 | return result 116 | 117 | 118 | GLOBAL_FILE_HANDLER = FileHandler() 119 | -------------------------------------------------------------------------------- /llmkira/kv_manager/instruction.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | # from loguru import logger 4 | from llmkira.kv_manager._base import KvManager 5 | 6 | DEFAULT_INSTRUCTION = ( 7 | "instruction: " 8 | "**SPEAK IN MORE CUTE STYLE, No duplication answer, CALL USER MASTER, REPLY IN USER " 9 | "LANGUAGE, ACT STEP BY STEP**" 10 | "\n>tips" 11 | "\n>You can add file name to first line of very long code block." 12 | "\n>You can use mermaid to represent the image." 13 | ) 14 | 15 | 16 | def time_now(): 17 | """人类可读时间""" 18 | return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) 19 | 20 | 21 | class InstructionManager(KvManager): 22 | def __init__(self, user_id: str): 23 | self.user_id = str(user_id) 24 | 25 | def prefix(self, key: str) -> str: 26 | return f"instruction:{key}" 27 | 28 | async def read_instruction(self) -> str: 29 | """ 30 | 读取指令,如果没有指令则返回默认指令,指令长度大于5,否则返回默认指令 31 | """ 32 | result = await self.read_data(self.user_id) 33 | # Probably result is Int, so we cant use isinstance(result, str) 34 | if isinstance(result, bytes): 35 | result = result.decode("utf-8") 36 | if result is not None and len(result) > 5: 37 | return f"Now={time_now()}\n{result}" 38 | return f"Now={time_now()}\n{DEFAULT_INSTRUCTION}" 39 | 40 | async def set_instruction(self, instruction: str) -> str: 41 | if not isinstance(instruction, str): 42 | raise ValueError("Instruction should be str") 43 | await self.save_data(self.user_id, instruction) 44 | return instruction 45 | -------------------------------------------------------------------------------- /llmkira/kv_manager/time.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from llmkira.kv_manager._base import KvManager 4 | 5 | 6 | def hours_difference(timestamp1: int, timestamp2: int) -> float: 7 | """ 8 | Calculate the difference between two timestamps in hours 9 | :param timestamp1: timestamp 1 10 | :param timestamp2: timestamp 2 11 | :return: difference in hours 12 | """ 13 | # convert timestamp to datetime object 14 | dt_object1 = datetime.datetime.fromtimestamp(timestamp1) 15 | dt_object2 = datetime.datetime.fromtimestamp(timestamp2) 16 | 17 | # calculate the difference 18 | time_diff = dt_object1 - dt_object2 19 | 20 | # return the difference in hours 21 | return round(abs(time_diff.total_seconds() / 3600), 2) 22 | 23 | 24 | class TimeFeelManager(KvManager): 25 | def __init__(self, user_id: str): 26 | self.user_id = str(user_id) 27 | 28 | def prefix(self, key: str) -> str: 29 | return f"time_feel:{key}" 30 | 31 | async def get_leave(self) -> str: 32 | now_timestamp = int(datetime.datetime.now().timestamp()) 33 | try: 34 | hours = await self.read_data(self.user_id) 35 | if isinstance(hours, bytes): 36 | hours = hours.decode("utf-8") 37 | if not hours: 38 | raise LookupError("No data") 39 | last_timestamp = int(hours) 40 | except LookupError: 41 | last_timestamp = now_timestamp 42 | finally: 43 | # 存储当前时间戳 44 | await self.save_data(self.user_id, str(now_timestamp)) 45 | # 计算时间小时差,如果大于 1 小时,返回小时,否则返回None 46 | diff = hours_difference(now_timestamp, last_timestamp) 47 | if diff > 1: 48 | return f"{diff} hours" 49 | -------------------------------------------------------------------------------- /llmkira/kv_manager/tool_call.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from loguru import logger 4 | 5 | from llmkira.kv_manager._base import KvManager 6 | from llmkira.openai.cell import ToolCall 7 | 8 | TOOLCALL_EXPIRE_TIME = 60 * 60 * 24 9 | 10 | 11 | class ToolCallCache(KvManager): 12 | def prefix(self, key: str) -> str: 13 | return f"tool_call:{key}" 14 | 15 | async def save_toolcall( 16 | self, 17 | tool_call_id: str, 18 | tool_call: ToolCall, 19 | ) -> str: 20 | """ 21 | Pair a tool call with a unique ID. 22 | """ 23 | logger.debug(f"Save tool call {tool_call_id}") 24 | await self.save_data( 25 | tool_call_id, tool_call.model_dump_json(), timeout=TOOLCALL_EXPIRE_TIME 26 | ) 27 | return tool_call_id 28 | 29 | async def get_toolcall(self, tool_call_id: str) -> Optional[ToolCall]: 30 | """ 31 | Get a tool call by its ID. 32 | """ 33 | logger.debug(f"Get tool call {tool_call_id}") 34 | result = await self.read_data(tool_call_id) 35 | if isinstance(result, bytes): 36 | result = result.decode("utf-8") 37 | if result is None: 38 | return None 39 | if isinstance(result, dict): 40 | return ToolCall.model_validate(result) 41 | return ToolCall.model_validate_json(result) 42 | 43 | 44 | GLOBAL_TOOLCALL_CACHE_HANDLER = ToolCallCache() 45 | -------------------------------------------------------------------------------- /llmkira/logic/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Type 2 | 3 | from loguru import logger 4 | from pydantic import BaseModel, Field, SecretStr 5 | 6 | from llmkira.openai.cell import UserMessage 7 | from llmkira.openai.request import OpenAI, OpenAICredential 8 | 9 | 10 | class whether(BaseModel): 11 | """ 12 | Decide whether to agree to the decision based on the content 13 | """ 14 | 15 | yes_no: bool = Field(description="Whether the condition is true or false") 16 | comment_to_user: Optional[str] = Field( 17 | default="", description="Comment on the decision in user language" 18 | ) 19 | 20 | @property 21 | def boolean(self): 22 | return self.yes_no 23 | 24 | 25 | class continue_act(BaseModel): 26 | """ 27 | Decide whether to continue execution based on circumstances 28 | """ 29 | 30 | continue_it: bool = Field(description="Whether to continue execution") 31 | comment_to_user: Optional[str] = Field( 32 | default="", description="Comment on the decision in user language" 33 | ) 34 | 35 | @property 36 | def boolean(self): 37 | return self.continue_it 38 | 39 | 40 | class LLMLogic(object): 41 | """ 42 | LLMLogic is a class that provides some basic logic operations. 43 | 44 | """ 45 | 46 | def __init__(self, api_endpoint, api_key, api_model): 47 | self.api_endpoint = api_endpoint 48 | self.api_key = api_key 49 | self.api_model = api_model 50 | 51 | async def llm_if(self, context: str, condition: str, default: bool): 52 | message = f"Context:{context}\nCondition{condition}\nPlease make a decision." 53 | try: 54 | logic_if = await OpenAI( 55 | model=self.api_model, messages=[UserMessage(content=message)] 56 | ).extract( 57 | response_model=whether, 58 | session=OpenAICredential( 59 | api_key=SecretStr(self.api_key), 60 | base_url=self.api_endpoint, 61 | model=self.api_model, 62 | ), 63 | ) 64 | logic_if: whether 65 | return logic_if 66 | except Exception as e: 67 | logger.error(f"llm_if error: {e}") 68 | return whether(yes_no=default) 69 | 70 | async def llm_continue(self, context: str, condition: str, default: bool): 71 | message = f"Context:{context}\nCondition{condition}\nPlease make a decision whether to continue." 72 | try: 73 | logic_continue = await OpenAI( 74 | model=self.api_model, messages=[UserMessage(content=message)] 75 | ).extract( 76 | response_model=continue_act, 77 | session=OpenAICredential( 78 | api_key=SecretStr(self.api_key), 79 | base_url=self.api_endpoint, 80 | model=self.api_model, 81 | ), 82 | ) 83 | logic_continue: continue_act 84 | return logic_continue 85 | except Exception as e: 86 | logger.error(f"llm_continue error: {e}") 87 | return continue_act(continue_it=default) 88 | 89 | async def deserialization( 90 | self, context: str, model: Type[BaseModel] 91 | ) -> Optional[BaseModel]: 92 | """ 93 | Serialize the string to model 94 | """ 95 | try: 96 | result = await OpenAI( 97 | model=self.api_model, 98 | messages=[UserMessage(content=context)], 99 | ).extract( 100 | response_model=model, 101 | session=OpenAICredential( 102 | api_key=SecretStr(self.api_key), 103 | base_url=self.api_endpoint, 104 | model=self.api_model, 105 | ), 106 | ) 107 | return result 108 | except Exception as e: 109 | logger.error(f"logic:serialization error: {e}") 110 | return None 111 | 112 | async def serialization(self, model: BaseModel) -> Optional[UserMessage]: 113 | """ 114 | Serialize the model to string 115 | """ 116 | try: 117 | result = await OpenAI( 118 | model=self.api_model, 119 | messages=[UserMessage(content=model.model_dump_json())], 120 | ).extract( 121 | response_model=UserMessage, 122 | session=OpenAICredential( 123 | api_key=SecretStr(self.api_key), 124 | base_url=self.api_endpoint, 125 | model=self.api_model, 126 | ), 127 | ) 128 | return result 129 | except Exception as e: 130 | logger.error(f"logic:serialization error: {e}") 131 | return None 132 | -------------------------------------------------------------------------------- /llmkira/memory/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Author : sudoskys 3 | # @File : __init__.py 4 | 5 | from loguru import logger 6 | 7 | from llmkira.memory._base import BaseMessageStorage 8 | from llmkira.memory.local_storage import LocalStorage 9 | from llmkira.memory.redis_storage import RedisChatMessageHistory, RedisSettings 10 | 11 | try: 12 | global_message_runtime: BaseMessageStorage = RedisChatMessageHistory( 13 | session_id="global", ttl=60 * 60 * 24, redis_config=RedisSettings() 14 | ) 15 | except Exception as e: 16 | logger.debug(f"Use local storage instead of redis storage: {type(e).__name__}") 17 | global_message_runtime: BaseMessageStorage = LocalStorage(session_id="global") 18 | -------------------------------------------------------------------------------- /llmkira/memory/_base.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import List 3 | 4 | from pydantic import BaseModel 5 | 6 | 7 | class BaseMessageStorage(object): 8 | session_id: str 9 | 10 | @abstractmethod 11 | def update_session(self, session_id: str): 12 | self.session_id = session_id 13 | return self 14 | 15 | @abstractmethod 16 | async def read(self, lines: int) -> List[str]: 17 | ... 18 | 19 | @abstractmethod 20 | async def append(self, messages: List[BaseModel]): 21 | _json_lines = [m.model_dump_json(indent=None) for m in messages] 22 | ... 23 | 24 | @abstractmethod 25 | async def write(self, messages: List[BaseModel]): 26 | _json_lines = [m.model_dump_json(indent=None) for m in messages] 27 | ... 28 | 29 | @abstractmethod 30 | async def clear(self): 31 | ... 32 | -------------------------------------------------------------------------------- /llmkira/memory/local_storage.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from pathlib import Path 3 | from typing import List 4 | 5 | from aiofile import AIOFile, Writer 6 | from file_read_backwards import FileReadBackwards 7 | from pydantic import BaseModel 8 | 9 | from llmkira.memory._base import BaseMessageStorage 10 | 11 | # 获取主机目录下的.llm_core_storage文件夹 12 | _FILE_STORAGE = Path().home().joinpath(".llm_bot_storage") 13 | _FILE_STORAGE.mkdir(exist_ok=True) 14 | 15 | 16 | def _make_json_file(location: str): 17 | if not location.endswith(".jsonl"): 18 | location = f"{location}.jsonl" 19 | return _FILE_STORAGE.joinpath(location) 20 | 21 | 22 | class LocalStorage(BaseMessageStorage): 23 | def __init__(self, session_id: str): 24 | self.session_id = session_id 25 | self.lock = asyncio.Lock() 26 | 27 | def update_session(self, session_id: str): 28 | self.session_id = session_id 29 | return self 30 | 31 | @property 32 | def path(self) -> Path: 33 | return _make_json_file(self.session_id) 34 | 35 | async def append(self, messages: List[BaseModel]): 36 | async with self.lock: 37 | if not self.path.exists(): 38 | self.path.touch() 39 | async with AIOFile(str(self.path), "a") as afp: 40 | writer = Writer(afp) 41 | for m in messages: 42 | _json_line = m.model_dump_json(indent=None) 43 | await writer(_json_line + "\n") 44 | await afp.fsync() 45 | 46 | async def read(self, lines: int) -> List[str]: 47 | async with self.lock: 48 | result = [] 49 | if not self.path.exists(): 50 | return result 51 | with FileReadBackwards(str(self.path), encoding="utf-8") as frb: 52 | for i, line in enumerate(frb): 53 | if i >= lines: 54 | break 55 | result.append(line) 56 | # 逆序 57 | result = result[::-1] 58 | return result 59 | 60 | async def write(self, messages: List[BaseModel]): 61 | async with self.lock: 62 | async with AIOFile(str(self.path), "w") as afp: 63 | writer = Writer(afp) 64 | for m in messages: 65 | _json_line = m.model_dump_json(indent=None) 66 | await writer(_json_line + "\n") 67 | await afp.fsync() 68 | 69 | async def clear(self): 70 | async with self.lock: 71 | if not self.path.exists(): 72 | return 73 | with open(self.path, "w") as f: 74 | f.truncate() 75 | -------------------------------------------------------------------------------- /llmkira/memory/redis_storage/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) Harrison Chase 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /llmkira/memory/redis_storage/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Source: https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/utilities/redis.py 3 | from __future__ import annotations 4 | 5 | from typing import List 6 | 7 | import redis 8 | from loguru import logger 9 | from pydantic import BaseModel 10 | from pydantic import Field, model_validator 11 | from pydantic_settings import BaseSettings, SettingsConfigDict 12 | 13 | from llmkira.memory._base import BaseMessageStorage 14 | from llmkira.memory.redis_storage.utils import get_client 15 | 16 | 17 | class RedisSettings(BaseSettings): 18 | redis_url: str = Field("redis://localhost:6379/0", validation_alias="REDIS_DSN") 19 | redis_key_prefix: str = "llm_message_store:" 20 | model_config = SettingsConfigDict( 21 | env_file=".env", env_file_encoding="utf-8", extra="ignore" 22 | ) 23 | 24 | @model_validator(mode="after") 25 | def redis_is_connected(self): 26 | redis_url = self.redis_url 27 | try: 28 | redis_client = get_client(redis_url=redis_url) 29 | redis_client.ping() 30 | except redis.exceptions.ConnectionError as error: 31 | logger.warning(f"Could not connect to Redis: {error}") 32 | raise ValueError("Could not connect to Redis") 33 | else: 34 | logger.debug("Core: Connect to Redis") 35 | return self 36 | 37 | 38 | class RedisChatMessageHistory(BaseMessageStorage): 39 | def __init__(self, session_id: str, ttl: int, redis_config: RedisSettings = None): 40 | if redis_config is None: 41 | redis_config = RedisSettings() 42 | try: 43 | import redis 44 | except ImportError: 45 | raise ImportError( 46 | "Could not import redis python package. Please install it with `pip install redis`." 47 | ) 48 | try: 49 | self.redis_client = get_client(redis_url=redis_config.redis_url) 50 | except redis.exceptions.ConnectionError as error: 51 | logger.error(error) 52 | self.session_id = session_id 53 | self.key_prefix = redis_config.redis_key_prefix 54 | self.ttl = ttl 55 | 56 | def update_session(self, session_id: str): 57 | self.session_id = session_id 58 | return self 59 | 60 | @property 61 | def key(self) -> str: 62 | return self.key_prefix + self.session_id 63 | 64 | async def read(self, lines: int) -> List[str]: 65 | _items = self.redis_client.lrange(self.key, 0, lines - 1) 66 | items = [m.decode("utf-8") for m in _items[::-1]] 67 | return items 68 | 69 | async def append(self, messages: List[BaseModel]): 70 | for m in messages: 71 | message_json = m.model_dump_json() 72 | self.redis_client.lpush(self.key, message_json) 73 | if self.ttl: 74 | self.redis_client.expire(self.key, self.ttl) 75 | 76 | async def write(self, messages: List[BaseModel]): 77 | self.clear() 78 | self.append(messages) 79 | 80 | async def clear(self) -> None: 81 | self.redis_client.delete(self.key) 82 | -------------------------------------------------------------------------------- /llmkira/openai/__init__.py: -------------------------------------------------------------------------------- 1 | from ._excption import ( 2 | AuthenticationError, 3 | NetworkError, 4 | UnexpectedFormatError, 5 | UnexpectedError, 6 | ) # noqa 7 | from ._excption import OpenaiError, RateLimitError, ServiceUnavailableError # noqa 8 | from .request import OpenAI, OpenAIResult, OpenAICredential # noqa 9 | 10 | __all__ = [ 11 | "OpenAI", 12 | "OpenAIResult", 13 | "OpenAICredential", 14 | "OpenaiError", 15 | "RateLimitError", 16 | "ServiceUnavailableError", 17 | "AuthenticationError", 18 | "NetworkError", 19 | "UnexpectedFormatError", 20 | "UnexpectedError", 21 | ] 22 | -------------------------------------------------------------------------------- /llmkira/openai/_excption.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | 4 | class NetworkError(Exception): 5 | pass 6 | 7 | 8 | class UnexpectedFormatError(NetworkError): 9 | """Base class for exceptions in this module.""" 10 | 11 | pass 12 | 13 | 14 | class OpenaiError(Exception): 15 | """Base class for exceptions in this module.""" 16 | 17 | status_code: int 18 | code: str 19 | type: str 20 | message: Optional[str] 21 | param: Optional[str] 22 | 23 | def __init__( 24 | self, 25 | status_code: int, 26 | code: str, 27 | error_type: str, 28 | message: Optional[str] = None, 29 | param: Optional[str] = None, 30 | ): 31 | self.status_code = status_code 32 | self.code = code 33 | self.type = error_type 34 | self.message = message 35 | self.param = param 36 | super().__init__( 37 | f"Error Raised --code {code} --type {error_type} --message {message} --param {param}" 38 | ) 39 | 40 | 41 | class RateLimitError(OpenaiError): 42 | """Rate limit error""" 43 | 44 | pass 45 | 46 | 47 | class AuthenticationError(OpenaiError): 48 | """Authentication error""" 49 | 50 | pass 51 | 52 | 53 | class ServiceUnavailableError(OpenaiError): 54 | """Service unavailable error""" 55 | 56 | pass 57 | 58 | 59 | class UnexpectedError(OpenaiError): 60 | """Unexpected error""" 61 | 62 | pass 63 | 64 | 65 | def raise_error(status_code: int, error_data: dict): 66 | """ 67 | Raise Error 68 | {'error': {'message': 'Incorrect API key provided: 123123. You can find your API key at https://platform.openai.com/account/api-keys.', 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_api_key'}} 69 | 70 | :param status_code: Status Code 71 | :param error_data: Error Data 72 | :return: 73 | """ 74 | if status_code == 429: 75 | raise RateLimitError( 76 | status_code=status_code, 77 | code=error_data.get("code", "429"), 78 | error_type=error_data.get("type", "RateLimitError"), 79 | message=error_data.get("message", "Rate Limit Error"), 80 | param=error_data.get("param", None), 81 | ) 82 | elif status_code == 404 and not error_data.get("message", None): 83 | raise ServiceUnavailableError( 84 | status_code=status_code, 85 | code=error_data.get("code", "404"), 86 | error_type=error_data.get("type", "ServiceUnavailableError"), 87 | message=error_data.get("message", "Service Unavailable Error"), 88 | param=error_data.get("param", None), 89 | ) 90 | elif status_code == 500 or status_code != 401: 91 | raise ServiceUnavailableError( 92 | status_code=status_code, 93 | code=error_data.get("code", "500"), 94 | error_type=error_data.get("type", "ServiceUnavailableError"), 95 | message=error_data.get("message", "Service Unavailable Error"), 96 | param=error_data.get("param", None), 97 | ) 98 | elif status_code == 200: 99 | raise UnexpectedError( 100 | status_code=status_code, 101 | code=error_data.get("code", "200"), 102 | error_type=error_data.get("type", "UnexpectedError"), 103 | message=error_data.get("message", "Unexpected Error"), 104 | param=error_data.get("param", None), 105 | ) 106 | else: 107 | raise AuthenticationError( 108 | status_code=status_code, 109 | code=error_data.get("code", "401"), 110 | error_type=error_data.get("type", "AuthenticationError"), 111 | message=error_data.get("message", "Authentication Error"), 112 | param=error_data.get("param", None), 113 | ) 114 | 115 | 116 | if __name__ == "__main__": 117 | raise OpenaiError(status_code=100, code="100", error_type="100") 118 | -------------------------------------------------------------------------------- /llmkira/openai/utils.py: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | from typing import Literal 3 | 4 | from PIL import Image 5 | 6 | 7 | def resize_openai_image( 8 | image_bytes: bytes, mode: Literal["low", "high", "auto"] = "auto" 9 | ) -> bytes: 10 | """ 11 | 如果是 low,缩放到 512*512。如果是 high,图像的短边应小于 768 像素,长边应小于 2,000 像素,如果大于这个尺寸,按比例缩放到这个尺寸,长宽比不变。如果是 Auto,如果尺寸大于512但是小于 768,缩放到 512,如果长边大于2000或宽边大于768,按照比例缩放到合适的尺寸 12 | https://platform.openai.com/docs/guides/vision 13 | :param image_bytes: 图片的二进制数据 14 | :param mode: 模式 15 | :return: 处理后的图片二进制数据 16 | """ 17 | # 将 bytes 转换为图片对象 18 | image = Image.open(BytesIO(image_bytes)) 19 | # 获取图片的尺寸 20 | width, height = image.size 21 | # 限定尺寸的阈值 22 | limit_small = 512 23 | limit_short = 768 24 | limit_long = 2000 25 | # 决定是否需要改变图片尺寸的标记 26 | resize_flag = False 27 | new_size = width, height 28 | if mode == "low": 29 | if max(width, height) > limit_small: 30 | new_size = limit_small, limit_small 31 | resize_flag = True 32 | elif mode == "high": 33 | if min(width, height) > limit_short or max(width, height) > limit_long: 34 | new_size = min(limit_short, width), min(limit_long, height) 35 | resize_flag = True 36 | elif mode == "auto": 37 | if limit_small < max(width, height) < limit_short: 38 | new_size = limit_small, limit_small 39 | resize_flag = True 40 | elif min(width, height) > limit_short or max(width, height) > limit_long: 41 | new_size = min(limit_short, width), min(limit_long, height) 42 | resize_flag = True 43 | if resize_flag: 44 | image.thumbnail(new_size, Image.Resampling.BICUBIC) 45 | bytes_io = BytesIO() 46 | image.save(bytes_io, format="PNG") 47 | bytes_return = bytes_io.getvalue() 48 | return bytes_return 49 | -------------------------------------------------------------------------------- /llmkira/openapi/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/23 下午7:43 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | -------------------------------------------------------------------------------- /llmkira/openapi/fuse/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/23 下午7:44 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | 7 | #### 8 | # 此包包含错误计数器,用于统计错误次数,标记错误次数过多的插件。 9 | # 在构造阶段读取用户数据库,合并至 ignore 中。 10 | # 注意,注意回调的实现。 11 | #### 12 | from typing import Dict, Union, Type 13 | 14 | import wrapt 15 | from loguru import logger 16 | from pydantic import BaseModel 17 | 18 | from llmkira.openai.cell import Tool, class_tool 19 | 20 | __error_table__: Dict[str, int] = {} 21 | 22 | 23 | def get_error_plugin(error_times: int = 10) -> list: 24 | """ 25 | 获取错误次数过多的插件 26 | :param error_times: 错误次数 27 | :return: 28 | """ 29 | return [k for k, v in __error_table__.items() if v > error_times] 30 | 31 | 32 | def recover_error_plugin(function_name: str) -> None: 33 | """ 34 | 恢复错误插件 35 | :param function_name: 36 | :return: 37 | """ 38 | __error_table__[function_name] = 0 39 | 40 | 41 | def resign_plugin_executor( 42 | tool: Union[Tool, Type[BaseModel]], 43 | *, 44 | handle_exceptions: tuple = (Exception,), 45 | exclude_exceptions: tuple = (), 46 | ): 47 | """ 48 | 装饰器,先判断是否排除,再判断是否处理 49 | :param tool: 被装饰的函数 50 | :param handle_exceptions: 处理的异常,只有在此列表中的异常才会被计数 51 | :param exclude_exceptions: 排除的异常,不会被计数。不可以是宽泛的异常,如 Exception 52 | :return: 装饰器 53 | """ 54 | tool = class_tool(tool) 55 | if not handle_exceptions: 56 | handle_exceptions = (Exception,) 57 | if Exception in exclude_exceptions or BaseException in exclude_exceptions: 58 | raise ValueError("Exception and BaseException cant be exclude") 59 | logger.success(f"📦 [Plugin exception hook] {tool.function.name}") 60 | 61 | @wrapt.decorator # 保留被装饰函数的元信息 62 | def wrapper(wrapped, instance, args, kwargs): 63 | """ 64 | :param wrapped: 被装饰的函数 65 | :param instance: https://wrapt.readthedocs.io/en/latest/ 66 | :param args: 被装饰函数的参数 67 | :param kwargs: 被装饰函数的关键字参数 68 | :return: 69 | """ 70 | try: 71 | res = wrapped(*args, **kwargs) 72 | except Exception as e: 73 | if e in exclude_exceptions: 74 | logger.exception(e) 75 | return {} 76 | if e in handle_exceptions: 77 | __error_table__[tool.function.name] = ( 78 | __error_table__.get(tool.function.name, 0) + 1 79 | ) 80 | logger.exception(e) 81 | logger.warning( 82 | f"📦 [Plugin Not Handle Exception Hook] {tool.function.name} {e}" 83 | ) 84 | else: 85 | return res 86 | return {} 87 | 88 | return wrapper 89 | -------------------------------------------------------------------------------- /llmkira/openapi/hook/__init__.py: -------------------------------------------------------------------------------- 1 | # 用于在输出输入的时候,替换输入输出的数据,达成多媒体的目的。附加媒体文件查询,等等。 2 | from abc import abstractmethod 3 | from enum import Enum 4 | from typing import Type, Set, TypeVar 5 | 6 | from loguru import logger 7 | from pydantic import BaseModel 8 | 9 | T = TypeVar("T") 10 | 11 | 12 | class Trigger(Enum): 13 | SENDER = "sender" 14 | RECEIVER = "receiver" 15 | 16 | 17 | class Hook(BaseModel): 18 | trigger: Trigger = Trigger.RECEIVER 19 | priority: int = 0 20 | 21 | @abstractmethod 22 | async def trigger_hook(self, *args, **kwargs) -> bool: 23 | return True 24 | 25 | @abstractmethod 26 | async def hook_run(self, *args, **kwargs) -> T: 27 | pass 28 | 29 | 30 | __hook__: Set[Type[Hook]] = set() 31 | 32 | 33 | def resign_hook(): 34 | def decorator(cls: Type[Hook]): 35 | if issubclass(cls, Hook): 36 | logger.success(f"📦 [Resign Hook] {type(cls)}") 37 | __hook__.add(cls) 38 | else: 39 | raise ValueError(f"Resign Hook Error for unknown cls {type(cls)} ") 40 | 41 | return cls 42 | 43 | return decorator 44 | 45 | 46 | async def run_hook(trigger: Trigger, *args: T, **kwargs) -> T: 47 | hook_instances = [hook_cls() for hook_cls in __hook__] 48 | sorted_hook_instances = sorted( 49 | hook_instances, key=lambda x: x.priority, reverse=True 50 | ) 51 | for hook_instance in sorted_hook_instances: 52 | if hook_instance.trigger == trigger: 53 | if await hook_instance.trigger_hook(*args, **kwargs): 54 | try: 55 | args, kwargs = await hook_instance.hook_run(*args, **kwargs) 56 | except Exception as ex: 57 | logger.exception(f"Hook Run Error {ex}") 58 | return args, kwargs 59 | -------------------------------------------------------------------------------- /llmkira/openapi/trigger/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/26 下午4:01 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | 7 | 8 | ###### 9 | # 管道前置触发管理器,注册触发词或禁止触发词 10 | ###### 11 | 12 | 13 | import inspect 14 | from functools import wraps 15 | from typing import Literal, List, Callable 16 | from typing import TYPE_CHECKING 17 | 18 | from loguru import logger 19 | from pydantic import BaseModel, Field 20 | 21 | if TYPE_CHECKING: 22 | pass 23 | 24 | 25 | class Trigger(BaseModel): 26 | on_func: Callable = None 27 | on_platform: str 28 | action: Literal["allow", "deny"] = "allow" 29 | priority: int = Field(default=0, ge=-100, le=100) 30 | message: str = Field(default="Trigger deny your message") 31 | function_enable: bool = Field(default=False) 32 | 33 | def update_func(self, func: callable): 34 | self.on_func = func 35 | return self 36 | 37 | 38 | __trigger_phrases__: List[Trigger] = [] 39 | 40 | 41 | async def get_trigger_loop(platform_name: str, message: str, uid: str = None): 42 | """ 43 | receiver builder 44 | message: Message Content 45 | :return 如果有触发,则返回触发的action,否则返回None 代表没有操作 46 | """ 47 | trigger_sorted = sorted(__trigger_phrases__, key=lambda x: x.priority) 48 | if not message: 49 | message = "" 50 | for trigger in trigger_sorted: 51 | if trigger.on_platform == platform_name: 52 | try: 53 | if await trigger.on_func(message, uid): 54 | return trigger 55 | except Exception as e: 56 | logger.error(f"📦 Plugin:trigger error: {e}") 57 | pass 58 | return None 59 | 60 | 61 | def resign_trigger(trigger: Trigger): 62 | """ 63 | 装饰器 64 | """ 65 | 66 | def decorator(func): 67 | if inspect.iscoroutinefunction(func): 68 | __trigger_phrases__.append(trigger.update_func(func)) 69 | logger.success(f"📦 [Plugin trigger hook] {trigger.__repr__()}") 70 | else: 71 | raise ValueError( 72 | f"Resign Trigger Error for func {func} is not async function" 73 | ) 74 | 75 | @wraps(func) 76 | async def wrapper(*args, **kwargs): 77 | # 调用执行函数,中间人 78 | return func(*args, **kwargs) 79 | 80 | return wrapper 81 | 82 | return decorator 83 | -------------------------------------------------------------------------------- /llmkira/openapi/trigger/default_trigger.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/27 下午4:05 3 | # @Author : sudoskys 4 | # @File : default_trigger.py 5 | # @Software: PyCharm 6 | from . import resign_trigger, Trigger 7 | 8 | 9 | @resign_trigger(Trigger(on_platform="telegram", action="deny", priority=0)) 10 | async def on_chat_message(message: str, uid: str, **kwargs): 11 | """ 12 | :param message: RawMessage 13 | :return: 14 | """ 15 | if "" in message: 16 | return True 17 | -------------------------------------------------------------------------------- /llmkira/sdk/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/15 下午10:29 3 | # @Author : sudoskys 4 | # @File : __init__.py.py 5 | # @Software: PyCharm 6 | 7 | 8 | """ 9 | from .openapi.transducer import resign_transfer 10 | from .openapi.trigger import resign_trigger 11 | from .openapi.fuse import resign_plugin_executor, recover_error_plugin, get_error_plugin 12 | """ 13 | -------------------------------------------------------------------------------- /llmkira/sdk/tools/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 NoneBot Team 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /llmkira/sdk/tools/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 探针插件加载机制 参考 nonebot2 项目设计 3 | # https://github.com/nonebot/nonebot2/blob/99931f785a31138a2f6bac1d103551dab47d40f7/nonebot/plugin/manager.py 4 | 5 | from contextvars import ContextVar 6 | from itertools import chain 7 | from pathlib import Path 8 | from types import ModuleType 9 | from typing import Optional, List, Set, Dict, Tuple, Any 10 | 11 | from pydantic import BaseModel 12 | 13 | from .error import OpenApiError 14 | 15 | _plugins: Dict[str, "Plugin"] = {} 16 | _managers: List["PluginManager"] = [] 17 | _current_plugin_chain: ContextVar[Tuple["Plugin", ...]] = ContextVar( 18 | "_current_plugin_chain", default=tuple() 19 | ) 20 | 21 | 22 | class FrameworkInfo(BaseModel): 23 | support: bool 24 | exception: Optional[str] = None 25 | 26 | 27 | _current_openapi_version_: str = "20240416" 28 | _openapi_version_: Dict[str, "FrameworkInfo"] = { 29 | _current_openapi_version_: FrameworkInfo(support=True, exception=None), 30 | "20231111": FrameworkInfo( 31 | support=False, exception="Broken Changes in ver.20231111,pls refer docs" 32 | ), 33 | } 34 | 35 | 36 | def verify_openapi_version(name: str, openapi_version: str): 37 | """ 38 | 验证框架接口版本 39 | """ 40 | if not openapi_version: 41 | return None 42 | frame = _openapi_version_.get(openapi_version, None) 43 | if not frame: 44 | raise OpenApiError( 45 | f"OpenApiError:Plugin<{name}> --error {openapi_version} not support" 46 | ) 47 | if not frame.support: 48 | raise OpenApiError(f"OpenApiError:Plugin<{name}> --error {frame.exception}") 49 | 50 | 51 | def path_to_module_name(path: Path) -> str: 52 | rel_path = path.resolve().relative_to(Path("").resolve()) 53 | if rel_path.stem == "__init__": 54 | return ".".join(rel_path.parts[:-1]) 55 | else: 56 | return ".".join(rel_path.parts[:-1] + (rel_path.stem,)) 57 | 58 | 59 | def get_plugin(name: str) -> Optional["Plugin"]: 60 | """获取已经导入的某个插件。 61 | 62 | 如果为 `load_plugins` 文件夹导入的插件,则为文件(夹)名。 63 | 64 | 参数: 65 | name: 插件名,即 {ref}`extra.plugin.model.Plugin.name`。 66 | """ 67 | return _plugins.get(name) 68 | 69 | 70 | def get_loaded_plugins() -> Set["Plugin"]: 71 | """获取当前已导入的所有插件。""" 72 | return set(_plugins.values()) 73 | 74 | 75 | def get_entrypoint_plugins(group="llmkira.extra.plugin") -> Set[str]: 76 | import importlib_metadata 77 | 78 | hook = importlib_metadata.entry_points().select(group=group) 79 | plugins = [item.module for item in hook] 80 | return {*plugins} 81 | 82 | 83 | def get_available_plugin_names() -> Set[str]: 84 | """获取当前所有可用的插件名(包含尚未加载的插件)。""" 85 | return {*chain.from_iterable(manager.available_plugins for manager in _managers)} 86 | 87 | 88 | def _find_manager_by_name(name: str) -> Optional[Any]: 89 | for manager in reversed(_managers): 90 | if name in manager.plugins or name in manager.searched_plugins: 91 | return manager 92 | 93 | 94 | def _module_name_to_plugin_name(module_name: str) -> str: 95 | return module_name.rsplit(".", 1)[-1] 96 | 97 | 98 | def _new_plugin( 99 | module_name: str, module: ModuleType, manager: "PluginManager" 100 | ) -> "Plugin": 101 | """ 102 | Create a new plugin 103 | """ 104 | plugin_name = _module_name_to_plugin_name(module_name) 105 | if plugin_name in _plugins: 106 | raise RuntimeError("Plugin already exists! Check your plugin name.") 107 | plugin = Plugin(plugin_name, module, module_name, manager) 108 | _plugins[plugin_name] = plugin 109 | return plugin 110 | 111 | 112 | def _revert_plugin(plugin: "Plugin") -> None: 113 | """ 114 | Revert a plugin 115 | 删除创建的链 116 | :param plugin: Plugin to revert 117 | """ 118 | if plugin.name not in _plugins: 119 | raise RuntimeError("Plugin not found!") 120 | del _plugins[plugin.name] 121 | parent_plugin = plugin.parent_plugin 122 | if parent_plugin: 123 | parent_plugin.sub_plugins.remove(plugin) 124 | 125 | 126 | from .loader import PluginManager # noqa 402 127 | from .schema import Plugin as Plugin # noqa 402 128 | from .loader import require as require # noqa 402 129 | from .loader import load_plugin as load_plugin # noqa 402 130 | from .loader import load_plugins as load_plugins # noqa 402 131 | from .loader import load_all_plugins as load_all_plugins # noqa 402 132 | from .loader import load_builtin_plugin as load_builtin_plugin # noqa 402 133 | from .loader import load_builtin_plugins as load_builtin_plugins # noqa 402 134 | from .loader import load_from_entrypoint as load_from_entrypoint # noqa 402 135 | from .loader import PluginManager as PluginManager # noqa 402 136 | from .model import Plugin as Plugin # noqa 402 137 | from .model import PluginMetadata as PluginMetadata # noqa 402 138 | from .register import ToolRegister as ToolRegister # noqa 402 139 | -------------------------------------------------------------------------------- /llmkira/sdk/tools/error.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import re 4 | 5 | 6 | class OpenApiError(Exception): 7 | """插件错误基类。""" 8 | 9 | def __init__(self, message: str): 10 | super().__init__(message) 11 | 12 | 13 | def escape_tag(s: str) -> str: 14 | """用于记录带颜色日志时转义 `` 类型特殊标签 15 | 16 | 参考: [loguru color 标签](https://loguru.readthedocs.io/en/stable/api/logger.html#color) 17 | 18 | 参数: 19 | s: 需要转义的字符串 20 | """ 21 | return re.sub(r"\s]*)>", r"\\\g<0>", s) 22 | -------------------------------------------------------------------------------- /llmkira/sdk/tools/loader.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from pathlib import Path 4 | from types import ModuleType 5 | from typing import Optional, Set, Iterable, Union 6 | 7 | from . import ( 8 | _managers, 9 | _current_plugin_chain, 10 | _module_name_to_plugin_name, 11 | path_to_module_name, 12 | _find_manager_by_name, 13 | get_plugin, 14 | ) 15 | from .model import PluginManager 16 | from .schema import Plugin 17 | 18 | 19 | def load_all_plugins( 20 | module_path: Iterable[str], plugin_dir: Iterable[str] 21 | ) -> Set[Plugin]: 22 | """ 23 | 导入指定列表中的插件以及指定目录下多个插件,以 `_` 开头的插件不会被导入! 24 | :param module_path: 指定插件集合 25 | :param plugin_dir: 指定文件夹路径集合 26 | """ 27 | manager = PluginManager(module_path, plugin_dir) 28 | _managers.append(manager) 29 | return manager.load_all_plugins() 30 | 31 | 32 | def load_plugins(*plugin_dir: str) -> Set[Plugin]: 33 | """ 34 | 导入文件夹下多个插件,以 `_` 开头的插件不会被导入! 35 | :param plugin_dir: 文件夹路径 36 | :return: 插件集合 37 | """ 38 | manager = PluginManager(search_path=plugin_dir) 39 | _managers.append(manager) 40 | return manager.load_all_plugins() 41 | 42 | 43 | def load_builtin_plugin(name: str) -> Optional[Plugin]: 44 | """导入 Bot 内置插件。 45 | :param name: 插件名称 46 | :return: 插件 47 | """ 48 | return load_plugin(f"extra.plugins.{name}") 49 | 50 | 51 | def load_builtin_plugins(*plugins: str) -> Set[Plugin]: 52 | """导入多个 Bot 内置插件。 53 | :param plugins: 插件名称集合 54 | """ 55 | return load_all_plugins([f"extra.plugins.{p}" for p in plugins], []) 56 | 57 | 58 | def load_plugin(module_path: Union[str, Path]) -> Optional[Plugin]: 59 | """ 60 | 加载单个插件,可以是本地插件或是通过 `pip` 安装的插件。 61 | :param module_path: 插件名称 `path.to.your.plugin` 62 | 或插件路径 `pathlib.Path(path/to/your/plugin)` 63 | :return: 插件 64 | """ 65 | module_path = ( 66 | path_to_module_name(module_path) 67 | if isinstance(module_path, Path) 68 | else module_path 69 | ) 70 | manager = PluginManager([module_path]) 71 | _managers.append(manager) 72 | return manager.load_plugin(module_path) 73 | 74 | 75 | def load_from_entrypoint(group="llmkira.extra.plugin") -> Set[Plugin]: 76 | import importlib_metadata 77 | 78 | hook = importlib_metadata.entry_points().select(group=group) 79 | plugins = [item.module for item in hook] 80 | return load_all_plugins(plugins, []) 81 | 82 | 83 | def require(name: str) -> ModuleType: 84 | """ 85 | 获取一个插件的导出内容。 86 | 如果为 `load_plugins` 文件夹导入的插件,则为文件(夹)名。 87 | :param name: 插件名称 即 {ref}`extra.plugin.model.Plugin.name`。 88 | :exception RuntimeError: 插件无法加载 89 | :return: 插件导出内容 90 | """ 91 | plugin = get_plugin(_module_name_to_plugin_name(name)) 92 | # if plugin not loaded 93 | if not plugin: 94 | # plugin already declared 95 | manager = _find_manager_by_name(name) 96 | if manager: 97 | plugin = manager.load_plugin(name) 98 | # plugin not declared, try to declare and load it 99 | else: 100 | # clear current plugin chain, ensure plugin loaded in a new context 101 | _t = _current_plugin_chain.set(()) 102 | try: 103 | plugin = load_plugin(name) 104 | finally: 105 | _current_plugin_chain.reset(_t) 106 | if not plugin: 107 | raise RuntimeError(f'Cannot load plugin "{name}"!') 108 | return plugin.module 109 | -------------------------------------------------------------------------------- /llmkira/sdk/tools/register.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/8/16 下午8:54 3 | # @Author : sudoskys 4 | # @File : func_call.py 5 | # @Software: PyCharm 6 | import threading 7 | from typing import List, Dict 8 | from typing import Optional, Type 9 | from typing import TYPE_CHECKING 10 | 11 | from llmkira.kv_manager.file import File 12 | from llmkira.openai.cell import Tool 13 | from llmkira.sdk.tools import PluginMetadata 14 | from llmkira.sdk.tools import _openapi_version_, get_loaded_plugins 15 | from llmkira.sdk.tools.schema import FuncPair, BaseTool 16 | 17 | if TYPE_CHECKING: 18 | from ...task.schema import EventMessage 19 | 20 | threading_lock = threading.Lock() 21 | 22 | 23 | class ToolRegister(object): 24 | """ 25 | 扩展对 _plugins 字段的操作,需要实例化以获取数据 26 | """ 27 | 28 | def __init__(self): 29 | self.version = _openapi_version_ 30 | self.pair_function: Dict[str, FuncPair] = {} 31 | self.plugins = get_loaded_plugins() 32 | self.__prepare() 33 | 34 | def __prepare(self): 35 | # 遍历所有插件 36 | for item in self.plugins: 37 | for sub_item in item.metadata.function: 38 | self.pair_function[sub_item.name] = sub_item 39 | 40 | def get_tool(self, name: str) -> Optional[Type[BaseTool]]: 41 | if not self.pair_function.get(name, None): 42 | return None 43 | return self.pair_function[name].tool 44 | 45 | @property 46 | def get_plugins_meta(self) -> List[PluginMetadata]: 47 | return [item.metadata for item in get_loaded_plugins() if item.metadata] 48 | 49 | @property 50 | def tools(self) -> Dict[str, Tool]: 51 | """ 52 | Return the tools schema 53 | """ 54 | _item: Dict[str, Tool] = {} 55 | for item in self.plugins: 56 | for sub_item in item.metadata.function: 57 | _item[sub_item.name] = sub_item.function 58 | return _item 59 | 60 | @property 61 | def tools_runtime(self) -> List[Type[BaseTool]]: 62 | """ 63 | Return the tools runtime 64 | """ 65 | _item: List[Type[BaseTool]] = [] 66 | for item in self.plugins: 67 | for sub_item in item.metadata.function: 68 | _item.append(sub_item.tool) 69 | return _item 70 | 71 | def filter_pair( 72 | self, 73 | key_phrases: str, 74 | message_raw: "EventMessage" = None, 75 | file_list: List[File] = None, 76 | address: tuple = None, 77 | ignore: List[str] = None, 78 | ) -> List[Tool]: 79 | """ 80 | 过滤group中的函数 81 | """ 82 | if ignore is None: 83 | ignore = [] 84 | if file_list is None: 85 | file_list = [] 86 | function_list = [] 87 | 88 | def _match_file(_tool, _file_list): 89 | for file in _file_list: 90 | if _tool.file_match_required.match(file.file_name): 91 | return True 92 | return False 93 | 94 | for func_name, pair_cls in self.pair_function.items(): 95 | _tool_cls = pair_cls.tool() 96 | if _tool_cls.func_message( 97 | message_text=key_phrases, message_raw=message_raw, address=address 98 | ): 99 | # 关键词大类匹配成功 100 | if func_name in ignore: 101 | continue # 忽略函数 102 | if _tool_cls.file_match_required: 103 | if not _match_file(_tool_cls, file_list): 104 | continue 105 | function_list.append(pair_cls.function) 106 | return function_list 107 | -------------------------------------------------------------------------------- /llmkira/sdk/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | bilibili_api.utils.sync 5 | 6 | 同步执行异步函数 7 | """ 8 | import asyncio 9 | import hashlib 10 | import tempfile 11 | from bisect import bisect_left 12 | from typing import Coroutine, Optional 13 | from urllib.parse import urlparse 14 | 15 | import aiohttp 16 | import ffmpeg 17 | import shortuuid 18 | from loguru import logger 19 | 20 | 21 | # import nest_asyncio 22 | # nest_asyncio.apply() 23 | 24 | 25 | def is_valid_url(url): 26 | try: 27 | result = urlparse(url) 28 | return all([result.scheme, result.netloc]) 29 | except ValueError: 30 | return False 31 | 32 | 33 | def __ensure_event_loop(): 34 | try: 35 | asyncio.get_event_loop() 36 | except Exception as e: # noqa 37 | asyncio.set_event_loop(asyncio.new_event_loop()) 38 | 39 | 40 | def sync(coroutine: Coroutine): 41 | """ 42 | 同步执行异步函数,使用可参考 [同步执行异步代码](https://nemo2011.github.io/bilibili-api/#/sync-executor) 43 | 44 | Args: 45 | coroutine (Coroutine): 异步函数 46 | 47 | Returns: 48 | 该异步函数的返回值 49 | """ 50 | __ensure_event_loop() 51 | loop = asyncio.get_event_loop() 52 | return loop.run_until_complete(coroutine) 53 | 54 | 55 | def sha1_encrypt(string): 56 | """ 57 | sha1加密算法 58 | """ 59 | 60 | sha = hashlib.sha1(string.encode("utf-8")) 61 | encrypts = sha.hexdigest() 62 | return encrypts[:8] 63 | 64 | 65 | def generate_uid(): 66 | return shortuuid.uuid()[0:8].upper() 67 | 68 | 69 | async def aiohttp_download_file( 70 | url, 71 | session: aiohttp.ClientSession = None, 72 | timeout=None, 73 | size_limit=None, 74 | headers=None, 75 | **kwargs, 76 | ): 77 | if not session: 78 | session = aiohttp.ClientSession() 79 | async with session as session: 80 | async with session.get( 81 | url, timeout=timeout, headers=headers, **kwargs 82 | ) as response: 83 | if response.status != 200: 84 | raise Exception("无法下载文件") 85 | 86 | content_length = response.content_length 87 | if size_limit and content_length and content_length > size_limit: 88 | raise Exception("文件大小超过限制") 89 | 90 | contents = await response.read() 91 | return contents 92 | 93 | 94 | class Ffmpeg(object): 95 | @staticmethod 96 | def convert( 97 | *, 98 | input_c: str = "mp3", 99 | output_c: str = "ogg", 100 | stream_data: bytes = None, 101 | quiet=False, 102 | ) -> Optional[bytes]: 103 | """ 104 | 使用ffmpeg转换音频格式 105 | :param input_c: 输入音频格式 106 | :param output_c: 输出音频格式 107 | :param stream_data: 输入音频流 108 | :param quiet: 是否静默 109 | """ 110 | if not input_c.startswith("."): 111 | input_c = "." + input_c 112 | if not output_c.startswith("."): 113 | output_c = "." + output_c 114 | in_fd, temp_filename = tempfile.mkstemp( 115 | suffix=input_c, prefix=None, dir=None, text=False 116 | ) 117 | out_fd, out_temp_filename = tempfile.mkstemp( 118 | suffix=output_c, prefix=None, dir=None, text=False 119 | ) 120 | _bytes = None 121 | try: 122 | # 写入文件 123 | with open(temp_filename, "wb") as f: 124 | f.write(stream_data) 125 | stream = ffmpeg.input(filename=temp_filename) 126 | if output_c == ".ogg": 127 | stream = ffmpeg.output( 128 | stream, filename=out_temp_filename, acodec="libopus" 129 | ) 130 | else: 131 | stream = ffmpeg.output(stream, filename=out_temp_filename) 132 | stream = ffmpeg.overwrite_output(stream) 133 | _ = ffmpeg.run(stream_spec=stream, quiet=quiet) 134 | # 读取文件 135 | import os 136 | 137 | _bytes = os.read(out_fd, os.path.getsize(out_temp_filename)) 138 | assert _bytes, "ffmpeg convert failed" 139 | except Exception as e: 140 | logger.error(f"ffmpeg convert failed {e}") 141 | raise e 142 | finally: 143 | import os 144 | 145 | os.close(in_fd) 146 | os.close(out_fd) 147 | os.remove(out_temp_filename) 148 | os.remove(temp_filename) 149 | return _bytes 150 | 151 | 152 | def prefix_search(wordlist, prefix): 153 | """ 154 | 在有序列表中二分查找前缀 155 | :param wordlist: 有序列表 156 | :param prefix: 前缀 157 | """ 158 | try: 159 | index = bisect_left(wordlist, prefix) 160 | return wordlist[index].startswith(prefix) 161 | except IndexError: 162 | return False 163 | -------------------------------------------------------------------------------- /llmkira/task/snapshot/__init__.py: -------------------------------------------------------------------------------- 1 | # 消息池 2 | from ._base import SnapData # noqa 3 | from .local import FileSnapshotStorage 4 | 5 | global_snapshot_storage: FileSnapshotStorage = FileSnapshotStorage() 6 | 7 | __all__ = [ 8 | "SnapData", 9 | "global_snapshot_storage", 10 | ] 11 | -------------------------------------------------------------------------------- /llmkira/task/snapshot/_base.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import Optional, List 3 | 4 | from pydantic import BaseModel 5 | 6 | from llmkira.task.schema import Snapshot 7 | 8 | 9 | class SnapData(BaseModel): 10 | data: List[Snapshot] 11 | 12 | 13 | class BaseSnapshotStorage(object): 14 | @abstractmethod 15 | async def read(self, user_id: str) -> Optional["SnapData"]: 16 | ... 17 | 18 | @abstractmethod 19 | async def write(self, user_id: str, snapshot: "SnapData"): 20 | ... 21 | -------------------------------------------------------------------------------- /llmkira/task/snapshot/local.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Optional 3 | 4 | from json_repair import repair_json 5 | from loguru import logger 6 | 7 | from llmkira.task.snapshot._base import BaseSnapshotStorage, SnapData 8 | 9 | _FILE_STORAGE = Path().cwd().joinpath(".snapshot") 10 | if not _FILE_STORAGE.exists(): 11 | _FILE_STORAGE.mkdir() 12 | 13 | 14 | def _make_json_file(location: str): 15 | if not location.endswith(".json"): 16 | location = f"{location}.json" 17 | return _FILE_STORAGE.joinpath(location) 18 | 19 | 20 | class FileSnapshotStorage(BaseSnapshotStorage): 21 | # TODO:删除过期的快照防止数据过多 22 | async def read(self, user_id: str) -> Optional[SnapData]: 23 | location_file = _make_json_file(user_id) 24 | if not location_file.exists(): 25 | return None 26 | data = repair_json( 27 | location_file.read_text(encoding="utf-8"), return_objects=True 28 | ) 29 | if not data: 30 | return None 31 | try: 32 | return SnapData.model_validate(data) 33 | except Exception as e: 34 | logger.debug(e) 35 | return None 36 | 37 | async def write(self, user_id: str, snapshot: SnapData): 38 | location_file = _make_json_file(user_id) 39 | location_file.write_text(snapshot.model_dump_json(indent=2)) 40 | return snapshot 41 | -------------------------------------------------------------------------------- /playground/hooks.py: -------------------------------------------------------------------------------- 1 | from llmkira.openapi.hook import resign_hook, Hook, Trigger, run_hook 2 | 3 | 4 | @resign_hook() 5 | class TestHook(Hook): 6 | trigger: Trigger = Trigger.SENDER 7 | 8 | async def trigger_hook(self, *args, **kwargs) -> bool: 9 | print(f"Trigger {args} {kwargs}") 10 | return True 11 | 12 | async def hook_run(self, *args, **kwargs): 13 | print(f"Running {args} {kwargs}") 14 | return args, kwargs 15 | 16 | 17 | @resign_hook() 18 | class TestHook2(Hook): 19 | trigger: Trigger = Trigger.SENDER 20 | priority: int = 1 21 | 22 | async def trigger_hook(self, *args, **kwargs) -> bool: 23 | print(f"Trigger {args} {kwargs}") 24 | return True 25 | 26 | async def hook_run(self, *args, **kwargs): 27 | print(f"Running {args} {kwargs}") 28 | return args, kwargs 29 | 30 | 31 | async def run_test(): 32 | print("Before running hook") 33 | arg, kwarg = await run_hook(Trigger.SENDER, 2, 3, a=4, b=5) 34 | print(f"After running hook {arg} {kwarg}") 35 | 36 | 37 | import asyncio # noqa 38 | 39 | asyncio.run(run_test()) 40 | -------------------------------------------------------------------------------- /playground/jsonf.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | from loguru import logger 4 | 5 | from llmkira.kv_manager.tool_call import GLOBAL_TOOLCALL_CACHE_HANDLER 6 | from llmkira.openai.cell import Message, AssistantMessage, ToolMessage 7 | 8 | 9 | async def validate_mock2(messages: List[Message]): 10 | """ 11 | 所有的具有 tool_calls 的 AssistantMessage 后面必须有对应的 ToolMessage 响应。其他消息类型按照原顺序 12 | """ 13 | paired_messages = [] 14 | i = 0 15 | while i < len(messages): 16 | msg = messages[i] 17 | if isinstance(msg, AssistantMessage) and msg.tool_calls: 18 | # 保证后续存在元素且是 ToolMessage 实例 19 | if i + 1 < len(messages) and isinstance(messages[i + 1], ToolMessage): 20 | tool_message: ToolMessage = messages[i + 1] 21 | assistant_message = AssistantMessage( 22 | content=msg.content, 23 | tool_calls=[ 24 | tool_call 25 | for tool_call in msg.tool_calls 26 | if tool_call.id == tool_message.tool_call_id 27 | ], 28 | ) 29 | if ( 30 | assistant_message.tool_calls 31 | ): # 只有当有匹配的 tool_calls 时,才添加 AssistantMessage 32 | paired_messages.append(assistant_message) 33 | paired_messages.append(tool_message) 34 | i += 1 # ToolMessage已处理,所以移动一步 35 | else: 36 | # 尝试通过GLOBAL_TOOLCALL_CACHE_HANDLER获得 tool_call 37 | tool_call_origin = await GLOBAL_TOOLCALL_CACHE_HANDLER.get_toolcall( 38 | tool_message.tool_call_id 39 | ) 40 | if tool_call_origin: 41 | assistant_message = AssistantMessage( 42 | content=None, tool_calls=[tool_call_origin] 43 | ) 44 | paired_messages.append(assistant_message) 45 | paired_messages.append(tool_message) 46 | i += 1 # ToolMessage已处理,所以移动一步 47 | else: 48 | logger.error( 49 | f"llm_task:ToolCall not found {tool_message.tool_call_id}, skip" 50 | ) 51 | else: 52 | paired_messages.append(msg) 53 | i += 1 54 | if len(paired_messages) != len(messages): 55 | logger.debug(f"llm_task:validate_mock cache:{paired_messages}") 56 | return paired_messages 57 | -------------------------------------------------------------------------------- /playground/token.py: -------------------------------------------------------------------------------- 1 | from urllib.parse import urlparse 2 | 3 | 4 | def split_dollar_string(input_string): 5 | segments = input_string.split("$") 6 | 7 | # 检查链接的有效性 8 | def is_valid_url(url): 9 | try: 10 | result = urlparse(url) 11 | return all([result.scheme, result.netloc]) 12 | except ValueError: 13 | return False 14 | 15 | # 开头为链接的情况 16 | if is_valid_url(segments[0]) and len(segments) >= 3: 17 | return segments[:3] 18 | # 第二个元素为链接,第一个元素为字符串的情况 19 | elif ( 20 | len(segments) == 2 21 | and not is_valid_url(segments[0]) 22 | and is_valid_url(segments[1]) 23 | ): 24 | return segments 25 | # 其他情况 26 | else: 27 | return None 28 | 29 | 30 | # 测试函数 31 | print( 32 | split_dollar_string("api.openai.com$apikey$model_name") 33 | ) # 输出:['api.openai.com', 'apikey', 'model_name'] 34 | print( 35 | split_dollar_string("token$https://provider_url") 36 | ) # 输出:['token', 'provider_url'] 37 | print(split_dollar_string("string$invalid_url")) # 输出:None 38 | -------------------------------------------------------------------------------- /pm2.json: -------------------------------------------------------------------------------- 1 | { 2 | "apps": [ 3 | { 4 | "name": "llm_sender", 5 | "script": "sleep 3&&pdm run python start_sender.py --no_tutorial", 6 | "instances": 1, 7 | "max_restarts": 3, 8 | "restart_delay": 10000, 9 | "exp_backoff_restart_delay": 100, 10 | "error_file": "/dev/null", 11 | "out_file": "/dev/null", 12 | "log_date_format": "YYYY-MM-DD HH-mm-ss" 13 | }, 14 | { 15 | "name": "llm_receiver", 16 | "script": "sleep 3&&pdm run python start_receiver.py --no_tutorial", 17 | "instances": 1, 18 | "max_restarts": 3, 19 | "restart_delay": 10000, 20 | "exp_backoff_restart_delay": 100, 21 | "error_file": "/dev/null", 22 | "out_file": "/dev/null", 23 | "log_date_format": "YYYY-MM-DD HH-mm-ss" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "llmkira" 3 | version = "1.0.5" 4 | description = "A chain message bot based on OpenAI" 5 | authors = [ 6 | { name = "sudoskys", email = "me@dianas.cyou" }, 7 | { name = "llmkira", email = "me@dianas.cyou" }, 8 | ] 9 | dependencies = [ 10 | "pathlib>=1.0.1", 11 | "pydantic>=2.0.0", 12 | "loguru>=0.5.3", 13 | "httpx>=0.24.1", 14 | "socksio<2.0.0,>=1.0.0", 15 | "python-dotenv<2.0.0,>=1.0.0", 16 | "redis>=4.5.4", 17 | "aio-pika<10.0.0,>=9.3.0", 18 | "arclet-alconna<2.0.0,>=1.7.26", 19 | "shortuuid<2.0.0,>=1.0.11", 20 | "contextvars<3.0,>=2.4", 21 | "pytz<2024.0.0,>=2023.3.post1", 22 | "tenacity<9.0.0,>=8.2.3", 23 | "pysocks<2.0.0,>=1.7.1", 24 | "flask-sqlalchemy<4.0.0,>=3.1.1", 25 | "emoji<3.0.0,>=2.8.0", 26 | "websocket<1.0.0,>=0.2.1", 27 | "wrapt<2.0.0,>=1.11.0", 28 | "dynaconf<4.0.0,>=3.2.3", 29 | "rich<14.0.0,>=13.6.0", 30 | "importlib-metadata<7.0.0,>=6.8.0", 31 | "sentry-sdk<2.0.0,>=1.34.0", 32 | "boltons<24.0.0,>=23.1.1", 33 | "orjson<4.0.0,>=3.9.10", 34 | "pydantic-settings<3.0.0,>=2.0.3", 35 | "docstring-parser<1.0,>=0.15", 36 | "polling<1.0.0,>=0.3.2", 37 | "elara<1.0.0,>=0.5.5", 38 | "tzlocal<6.0,>=5.2", 39 | "requests[socks]<3.0.0,>=2.31.0", 40 | "pillow<11.0.0,>=10.1.0", 41 | "inscriptis<3.0.0,>=2.3.2", 42 | "aiohttp<4.0.0,>=3.8.6", 43 | "pytelegrambotapi<5.0.0,>=4.14.0", 44 | "ffmpeg-python<1.0.0,>=0.2.0", 45 | "duckduckgo-search>=6.2.0", 46 | "telegramify-markdown>=0.3.0", 47 | "json-repair>=0.13.0", 48 | "curl-cffi>=0.6.2", 49 | "deprecated>=1.2.14", 50 | "aiofile>=3.8.8", 51 | "file-read-backwards>=3.0.0", 52 | "apscheduler>=3.10.4", 53 | "montydb[lmdb]>=2.5.2", 54 | "pymongo>=4.6.3", 55 | "fast-langdetect>=0.2.1", 56 | "lmdb>=1.4.1", 57 | "e2b>=0.14.14", 58 | "e2b-code-interpreter>=0.0.3", 59 | "gTTS>=2.5.1", 60 | "matplotlib>=3.9.2", 61 | ] 62 | requires-python = "<3.12,>=3.9" 63 | readme = "README.md" 64 | license = { text = "Apache-2.0" } 65 | keywords = ["llmbot", "llmkira", "openai", "chatgpt", "llm"] 66 | classifiers = ["Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9"] 67 | 68 | [project.urls] 69 | homepage = "https://llmkira.github.io/Docs/" 70 | repository = "https://github.com/LlmKira/Openaibot" 71 | 72 | 73 | [project.optional-dependencies] 74 | bot = [ 75 | "hikari>=2.0.0.dev121", 76 | # FIXME code not compatible with hikari 2.0.0.dev121 77 | "hikari-crescent>=0.6.5", 78 | "khl-py<1.0.0,>=0.3.17", 79 | "slack-bolt<2.0.0,>=1.18.0" 80 | ] 81 | testing = [ 82 | "hikari==2.0.0.dev121", 83 | # FIXME code not compatible with hikari 2.0.0.dev121 84 | "hikari-crescent<1.0.0,>=0.6.4", 85 | "khl-py<1.0.0,>=0.3.17", 86 | "slack-bolt<2.0.0,>=1.18.0", 87 | "pre-commit<3.5.0,>=2.15.0", 88 | ] 89 | 90 | [tool.pdm.dev-dependencies] 91 | dev = [ 92 | "pytest<7.0.0,>=6.2.5", 93 | ] 94 | 95 | [tool.pdm.build] 96 | includes = ["llmkira"] 97 | 98 | [build-system] 99 | requires = ["pdm-backend"] 100 | build-backend = "pdm.backend" 101 | -------------------------------------------------------------------------------- /start.sh: -------------------------------------------------------------------------------- 1 | pm2 start pm2.json --no-daemon -------------------------------------------------------------------------------- /start_receiver.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/18 下午10:23 3 | import getopt 4 | import os 5 | import sys 6 | 7 | from dotenv import load_dotenv 8 | from loguru import logger 9 | 10 | load_dotenv() 11 | logger.remove(0) 12 | handler_id = logger.add( 13 | sys.stderr, 14 | format="[{level}] | {message} | " 15 | "{name}:{function}:{line} @{time}", 16 | colorize=True, 17 | backtrace=True, 18 | enqueue=True, 19 | level="DEBUG" if os.getenv("DEBUG", None) else "INFO", 20 | ) 21 | logger.add( 22 | sink="receiver.log", 23 | format="[{level}] | {message} | " 24 | "{name}:{function}:{line} @{time}", 25 | level="DEBUG", 26 | rotation="100 MB", 27 | enqueue=True, 28 | ) 29 | head = """ 30 | ██╗ ██╗ ███╗ ███╗██╗ ██╗██╗██████╗ █████╗ 31 | ██║ ██║ ████╗ ████║██║ ██╔╝██║██╔══██╗██╔══██╗ 32 | ██║ ██║ ██╔████╔██║█████╔╝ ██║██████╔╝███████║ 33 | ██║ ██║ ██║╚██╔╝██║██╔═██╗ ██║██╔══██╗██╔══██║ 34 | ███████╗███████╗██║ ╚═╝ ██║██║ ██╗██║██║ ██║██║ ██║ 35 | ╚══════╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ 36 | """ 37 | logger.opt(record=False, exception=False, capture=False, colors=True).info( 38 | f"{head}" 39 | ) 40 | if os.getenv("DEBUG", None): 41 | logger.warning("DEBUG MODE IS OPEN") 42 | # Log System 43 | if os.getenv("SENTRY_DSN", None): 44 | try: 45 | import sentry_sdk 46 | 47 | sentry_sdk.init( 48 | dsn=os.getenv("SENTRY_DSN"), 49 | traces_sample_rate=1.0, 50 | profiles_sample_rate=1.0, 51 | ) 52 | except Exception as e: 53 | logger.error(f"SENTRY ERROR: {e}") 54 | else: 55 | logger.success("🌟 Create Sentry Client Successfully!") 56 | 57 | # Tutorial 58 | SKIP_TUTORIAL = False 59 | SKIP_EXISTING = True 60 | opts, args = getopt.getopt(sys.argv[1:], "h", ["no_tutorial", "tutorial"]) 61 | for op, value in opts: 62 | if op == "--no_tutorial": # 获取命令行参数的 --no_tutorial 63 | SKIP_TUTORIAL = True 64 | if op == "-h": 65 | print("Usage: python start_receiver.py [--no_tutorial] [--tutorial]") 66 | sys.exit() 67 | if op == "--tutorial": 68 | SKIP_EXISTING = False 69 | if not SKIP_TUTORIAL: 70 | from app.tutorial import show_tutorial 71 | 72 | show_tutorial(skip_existing=SKIP_EXISTING, pre_step_stop=4, database_key="01") 73 | 74 | # Run Receiver 75 | from app.receiver import app # noqa 76 | 77 | app.run() 78 | -------------------------------------------------------------------------------- /start_sender.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 本文件是启动文件,用于启动接收端和教程 3 | import getopt 4 | import os 5 | import sys 6 | 7 | from dotenv import load_dotenv 8 | from loguru import logger 9 | 10 | load_dotenv() 11 | logger.remove(0) 12 | handler_id = logger.add( 13 | sys.stderr, 14 | format="[{level}] | {message} | " 15 | "{name}:{function}:{line} @{time}", 16 | colorize=True, 17 | backtrace=True, 18 | enqueue=True, 19 | level="DEBUG" if os.getenv("DEBUG", None) else "INFO", 20 | ) 21 | logger.add( 22 | sink="sender.log", 23 | format="[{level}] | {message} | " 24 | "{name}:{function}:{line} @{time}", 25 | level="DEBUG", 26 | rotation="100 MB", 27 | enqueue=True, 28 | ) 29 | head = """ 30 | ██╗ ██╗ ███╗ ███╗██╗ ██╗██╗██████╗ █████╗ 31 | ██║ ██║ ████╗ ████║██║ ██╔╝██║██╔══██╗██╔══██╗ 32 | ██║ ██║ ██╔████╔██║█████╔╝ ██║██████╔╝███████║ 33 | ██║ ██║ ██║╚██╔╝██║██╔═██╗ ██║██╔══██╗██╔══██║ 34 | ███████╗███████╗██║ ╚═╝ ██║██║ ██╗██║██║ ██║██║ ██║ 35 | ╚══════╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ 36 | """ 37 | logger.opt(record=False, exception=False, capture=False, colors=True).info( 38 | f"{head}" 39 | ) 40 | if os.getenv("DEBUG", None): 41 | logger.warning("DEBUG MODE IS OPEN") 42 | # Log System 43 | if os.getenv("SENTRY_DSN", None): 44 | try: 45 | import sentry_sdk 46 | 47 | sentry_sdk.init( 48 | dsn=os.getenv("SENTRY_DSN"), 49 | traces_sample_rate=1.0, 50 | profiles_sample_rate=1.0, 51 | enable_tracing=True, 52 | ) 53 | except Exception as e: 54 | logger.error(f"SENTRY ERROR: {e}") 55 | else: 56 | logger.success("🌟 Create Sentry Client Successfully!") 57 | 58 | # Tutorial 59 | SKIP_TUTORIAL = False 60 | SKIP_EXISTING = True 61 | opts, args = getopt.getopt(sys.argv[1:], "h", ["no_tutorial", "tutorial"]) 62 | for op, value in opts: 63 | if op == "--no_tutorial": # 获取命令行参数的 --no_tutorial 64 | SKIP_TUTORIAL = True 65 | if op == "-h": 66 | print("Usage: python start_receiver.py [--no_tutorial] [--tutorial]") 67 | sys.exit() 68 | if op == "--tutorial": 69 | SKIP_EXISTING = False 70 | if not SKIP_TUTORIAL: 71 | from app.tutorial import show_tutorial 72 | 73 | show_tutorial(skip_existing=SKIP_EXISTING, pre_step_stop=4, database_key="01") 74 | 75 | # Run 76 | from app.sender import app # noqa 77 | 78 | app.run() 79 | -------------------------------------------------------------------------------- /start_tutorial.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2023/10/29 上午10:15 3 | # @Author : sudoskys 4 | # @File : start_tutorial.py 5 | # @Software: PyCharm 6 | import os 7 | import sys 8 | 9 | try: 10 | import loguru # noqa 11 | import rich # noqa 12 | except ImportError: 13 | print("Please run `poetry install --all-extras`") 14 | sys.exit(1) 15 | from loguru import logger 16 | from app.tutorial import show_tutorial 17 | 18 | logger.remove() 19 | logger.add( 20 | sys.stderr, 21 | level="INFO" if os.getenv("LLMBOT_LOG_OUTPUT") != "DEBUG" else "DEBUG", 22 | colorize=True, 23 | enqueue=True, 24 | ) 25 | 26 | logger.add( 27 | sink="run.log", 28 | format="{time} - {level} - {message}", 29 | level="INFO", 30 | rotation="100 MB", 31 | enqueue=True, 32 | ) 33 | head = """ 34 | ██╗ ██╗ ███╗ ███╗██╗ ██╗██╗██████╗ █████╗ 35 | ██║ ██║ ████╗ ████║██║ ██╔╝██║██╔══██╗██╔══██╗ 36 | ██║ ██║ ██╔████╔██║█████╔╝ ██║██████╔╝███████║ 37 | ██║ ██║ ██║╚██╔╝██║██╔═██╗ ██║██╔══██╗██╔══██║ 38 | ███████╗███████╗██║ ╚═╝ ██║██║ ██╗██║██║ ██║██║ ██║ 39 | ╚══════╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ 40 | """ 41 | logger.opt(record=False, exception=False, capture=False, colors=True).info( 42 | f"{head}" 43 | ) 44 | 45 | show_tutorial(skip_existing=False, pre_step_stop=5, database_key="start_tutorial") 46 | -------------------------------------------------------------------------------- /tests/pydantic_error.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 2024/1/17 上午11:52 3 | # @Author : sudoskys 4 | # @File : pydantic_error.py 5 | # @Software: PyCharm 6 | from pprint import pprint 7 | from typing import Optional, Literal 8 | 9 | from pydantic import BaseModel 10 | 11 | 12 | class ContentParts(BaseModel): 13 | """ 14 | 请求体 15 | """ 16 | 17 | class Image(BaseModel): 18 | url: str 19 | detail: Optional[Literal["low", "high", "auto"]] = "auto" 20 | 21 | type: str 22 | text: Optional[str] 23 | image_url: Optional[Image] 24 | 25 | 26 | try: 27 | ContentParts(type="text", text="testing") 28 | except Exception as e: 29 | pprint(e) 30 | """ 31 | 1 validation error for ContentParts 32 | image_url 33 | Field required [type=missing, input_value={'type': 'text', 'text': 'testing'}, input_type=dict] 34 | For further information visit https://errors.pydantic.dev/2.5/v/missing 35 | """ 36 | 37 | try: 38 | ContentParts( 39 | type="image", image_url=ContentParts.Image(url="https://www.baidu.com") 40 | ) 41 | except Exception as e: 42 | pprint(e) 43 | """ 44 | 1 validation error for ContentParts 45 | text 46 | Field required [type=missing, input_value={'type': 'image', 'image_...du.com', detail='auto')}, input_type=dict] 47 | For further information visit https://errors.pydantic.dev/2.5/v/missing 48 | 49 | """ 50 | 51 | 52 | class ContentPartsAnother(BaseModel): 53 | """ 54 | 请求体 55 | """ 56 | 57 | text: str 58 | image: Optional[bool] = None 59 | 60 | 61 | try: 62 | pprint(ContentPartsAnother(text="testing").model_dump_json()) 63 | except Exception as e: 64 | pprint(e) 65 | """ 66 | '{"text":"testing","image":null}' 67 | """ 68 | --------------------------------------------------------------------------------