The response has been limited to 50k tokens of the smallest files in the repo. You can remove this limitation by removing the max tokens filter.
├── .gitattributes
├── .github
    ├── ISSUE_TEMPLATE
    │   ├── bug_report.yml
    │   └── feature_request.yml
    └── workflows
    │   ├── build-with-all-capacity.yml
    │   ├── build-with-audio-assistant.yml
    │   ├── build-with-chatglm.yml
    │   ├── build-with-latex-arm.yml
    │   ├── build-with-latex.yml
    │   ├── build-without-local-llms.yml
    │   ├── conda-pack-windows.yml
    │   └── stale.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── check_proxy.py
├── config.py
├── core_functional.py
├── crazy_functional.py
├── crazy_functions
    ├── Conversation_To_File.py
    ├── Image_Generate.py
    ├── Image_Generate_Wrap.py
    ├── Internet_GPT.py
    ├── Internet_GPT_Wrap.py
    ├── Latex_Function.py
    ├── Latex_Function_Wrap.py
    ├── Latex_Project_Polish.py
    ├── Latex_Project_Translate_Legacy.py
    ├── Markdown_Translate.py
    ├── PDF_Translate.py
    ├── PDF_Translate_Wrap.py
    ├── Rag_Interface.py
    ├── Social_Helper.py
    ├── SourceCode_Analyse.py
    ├── SourceCode_Comment.py
    ├── SourceCode_Comment_Wrap.py
    ├── VideoResource_GPT.py
    ├── __init__.py
    ├── agent_fns
    │   ├── auto_agent.py
    │   ├── echo_agent.py
    │   ├── general.py
    │   ├── persistent.py
    │   ├── pipe.py
    │   ├── python_comment_agent.py
    │   ├── python_comment_compare.html
    │   └── watchdog.py
    ├── ast_fns
    │   └── comment_remove.py
    ├── crazy_utils.py
    ├── diagram_fns
    │   └── file_tree.py
    ├── doc_fns
    │   ├── AI_review_doc.py
    │   ├── __init__.py
    │   ├── batch_file_query_doc.py
    │   ├── content_folder.py
    │   ├── conversation_doc
    │   │   ├── excel_doc.py
    │   │   ├── html_doc.py
    │   │   ├── markdown_doc.py
    │   │   ├── pdf_doc.py
    │   │   ├── txt_doc.py
    │   │   ├── word2pdf.py
    │   │   └── word_doc.py
    │   └── read_fns
    │   │   ├── __init__.py
    │   │   ├── docx_reader.py
    │   │   ├── excel_reader.py
    │   │   ├── markitdown
    │   │       └── markdown_reader.py
    │   │   ├── unstructured_all
    │   │       ├── __init__.py
    │   │       ├── paper_metadata_extractor.py
    │   │       ├── paper_structure_extractor.py
    │   │       ├── unstructured_md.py
    │   │       └── unstructured_reader.py
    │   │   └── web_reader.py
    ├── game_fns
    │   ├── game_ascii_art.py
    │   ├── game_interactive_story.py
    │   └── game_utils.py
    ├── gen_fns
    │   └── gen_fns_shared.py
    ├── ipc_fns
    │   └── mp.py
    ├── json_fns
    │   ├── pydantic_io.py
    │   └── select_tool.py
    ├── latex_fns
    │   ├── latex_actions.py
    │   ├── latex_pickle_io.py
    │   └── latex_toolbox.py
    ├── live_audio
    │   ├── aliyunASR.py
    │   └── audio_io.py
    ├── media_fns
    │   └── get_media.py
    ├── multi_stage
    │   └── multi_stage_utils.py
    ├── pdf_fns
    │   ├── breakdown_txt.py
    │   ├── parse_pdf.py
    │   ├── parse_pdf_grobid.py
    │   ├── parse_pdf_legacy.py
    │   ├── parse_pdf_via_doc2x.py
    │   ├── parse_word.py
    │   ├── report_gen_html.py
    │   ├── report_template.html
    │   └── report_template_v2.html
    ├── plugin_template
    │   └── plugin_class_template.py
    ├── prompts
    │   └── internet.py
    ├── rag_fns
    │   ├── llama_index_worker.py
    │   ├── milvus_worker.py
    │   ├── rag_file_support.py
    │   └── vector_store_index.py
    ├── vector_fns
    │   ├── __init__.py
    │   ├── general_file_loader.py
    │   └── vector_database.py
    ├── vt_fns
    │   ├── vt_call_plugin.py
    │   ├── vt_modify_config.py
    │   └── vt_state.py
    ├── word_dfa
    │   └── dfa_algo.py
    ├── 下载arxiv论文翻译摘要.py
    ├── 互动小游戏.py
    ├── 交互功能函数模板.py
    ├── 函数动态生成.py
    ├── 命令行助手.py
    ├── 多智能体.py
    ├── 总结word文档.py
    ├── 总结音视频.py
    ├── 批量总结PDF文档.py
    ├── 批量总结PDF文档pdfminer.py
    ├── 批量翻译PDF文档_NOUGAT.py
    ├── 数学动画生成manim.py
    ├── 理解PDF文档内容.py
    ├── 生成函数注释.py
    ├── 生成多种Mermaid图表.py
    ├── 知识库问答.py
    ├── 联网的ChatGPT.py
    ├── 联网的ChatGPT_bing版.py
    ├── 虚空终端.py
    ├── 解析JupyterNotebook.py
    ├── 询问多个大语言模型.py
    ├── 语音助手.py
    ├── 读文章写摘要.py
    ├── 谷歌检索小助手.py
    ├── 辅助功能.py
    └── 高级功能函数模板.py
├── docker-compose.yml
├── docs
    ├── Dockerfile+ChatGLM
    ├── Dockerfile+NoLocal+Latex
    ├── GithubAction+AllCapacity
    ├── GithubAction+ChatGLM+Moss
    ├── GithubAction+JittorLLMs
    ├── GithubAction+NoLocal
    ├── GithubAction+NoLocal+AudioAssistant
    ├── GithubAction+NoLocal+Latex
    ├── GithubAction+NoLocal+Vectordb
    ├── README.Arabic.md
    ├── README.English.md
    ├── README.French.md
    ├── README.German.md
    ├── README.Italian.md
    ├── README.Japanese.md
    ├── README.Korean.md
    ├── README.Portuguese.md
    ├── README.Russian.md
    ├── WindowsRun.bat
    ├── WithFastapi.md
    ├── demo.jpg
    ├── demo2.jpg
    ├── logo.png
    ├── plugin_with_secondary_menu.md
    ├── self_analysis.md
    ├── translate_english.json
    ├── translate_japanese.json
    ├── translate_std.json
    ├── translate_traditionalchinese.json
    ├── use_audio.md
    ├── use_azure.md
    ├── use_tts.md
    └── use_vllm.md
├── main.py
├── multi_language.py
├── request_llms
    ├── README.md
    ├── bridge_all.py
    ├── bridge_chatglm.py
    ├── bridge_chatglm3.py
    ├── bridge_chatglm4.py
    ├── bridge_chatglmft.py
    ├── bridge_chatglmonnx.py
    ├── bridge_chatgpt.py
    ├── bridge_chatgpt_vision.py
    ├── bridge_claude.py
    ├── bridge_cohere.py
    ├── bridge_deepseekcoder.py
    ├── bridge_google_gemini.py
    ├── bridge_internlm.py
    ├── bridge_jittorllms_llama.py
    ├── bridge_jittorllms_pangualpha.py
    ├── bridge_jittorllms_rwkv.py
    ├── bridge_llama2.py
    ├── bridge_moonshot.py
    ├── bridge_moss.py
    ├── bridge_newbingfree.py
    ├── bridge_ollama.py
    ├── bridge_openrouter.py
    ├── bridge_qianfan.py
    ├── bridge_qwen.py
    ├── bridge_qwen_local.py
    ├── bridge_skylark2.py
    ├── bridge_spark.py
    ├── bridge_stackclaude.py
    ├── bridge_taichu.py
    ├── bridge_tgui.py
    ├── bridge_zhipu.py
    ├── chatglmoonx.py
    ├── com_google.py
    ├── com_qwenapi.py
    ├── com_skylark2api.py
    ├── com_sparkapi.py
    ├── com_taichu.py
    ├── com_zhipuglm.py
    ├── edge_gpt_free.py
    ├── embed_models
    │   ├── bridge_all_embed.py
    │   └── openai_embed.py
    ├── key_manager.py
    ├── local_llm_class.py
    ├── oai_std_model_template.py
    ├── queued_pipe.py
    ├── requirements_chatglm.txt
    ├── requirements_chatglm4.txt
    ├── requirements_chatglm_onnx.txt
    ├── requirements_jittorllms.txt
    ├── requirements_moss.txt
    ├── requirements_newbing.txt
    ├── requirements_qwen.txt
    ├── requirements_qwen_local.txt
    └── requirements_slackclaude.txt
├── requirements.txt
├── shared_utils
    ├── advanced_markdown_format.py
    ├── char_visual_effect.py
    ├── colorful.py
    ├── config_loader.py
    ├── connect_void_terminal.py
    ├── context_clip_policy.py
    ├── cookie_manager.py
    ├── docker_as_service_api.py
    ├── fastapi_server.py
    ├── handle_upload.py
    ├── key_pattern_manager.py
    ├── logging.py
    ├── map_names.py
    └── text_mask.py
├── tests
    ├── __init__.py
    ├── init_test.py
    ├── test_anim_gen.py
    ├── test_bilibili_down.py
    ├── test_doc2x.py
    ├── test_embed.py
    ├── test_key_pattern_manager.py
    ├── test_latex_auto_correct.py
    ├── test_llms.py
    ├── test_markdown.py
    ├── test_markdown_format.py
    ├── test_media.py
    ├── test_plugins.py
    ├── test_python_auto_docstring.py
    ├── test_rag.py
    ├── test_safe_pickle.py
    ├── test_save_chat_to_html.py
    ├── test_searxng.py
    ├── test_social_helper.py
    ├── test_tts.py
    ├── test_utils.py
    └── test_vector_plugins.py
├── themes
    ├── base64.mjs
    ├── common.css
    ├── common.js
    ├── common.py
    ├── contrast.css
    ├── contrast.py
    ├── cookies.py
    ├── default.css
    ├── default.py
    ├── gradios.py
    ├── green.css
    ├── green.js
    ├── green.py
    ├── gui_advanced_plugin_class.py
    ├── gui_floating_menu.py
    ├── gui_toolbar.py
    ├── init.js
    ├── svg
    │   ├── arxiv.svg
    │   ├── box.svg
    │   ├── brain.svg
    │   ├── check.svg
    │   ├── conf.svg
    │   ├── default.svg
    │   ├── doc.svg
    │   ├── img.svg
    │   ├── location.svg
    │   ├── mm.svg
    │   ├── polish.svg
    │   ├── tts.svg
    │   └── vt.svg
    ├── theme.js
    ├── theme.py
    ├── tts.js
    ├── waifu_plugin
    │   ├── autoload.js
    │   ├── flat-ui-icons-regular.eot
    │   ├── flat-ui-icons-regular.svg
    │   ├── flat-ui-icons-regular.ttf
    │   ├── flat-ui-icons-regular.woff
    │   ├── jquery-ui.min.js
    │   ├── jquery.min.js
    │   ├── live2d.js
    │   ├── source
    │   ├── waifu-tips.js
    │   ├── waifu-tips.json
    │   └── waifu.css
    └── welcome.js
├── toolbox.py
└── version


/.gitattributes:
--------------------------------------------------------------------------------
1 | *.h linguist-detectable=false
2 | *.cpp linguist-detectable=false
3 | *.tex linguist-detectable=false
4 | *.cs linguist-detectable=false
5 | *.tps linguist-detectable=false
6 | 


--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
 1 | name: Report Bug | 报告BUG
 2 | description: "Report bug"
 3 | title: "[Bug]: "
 4 | labels: []
 5 | body:
 6 |   - type: dropdown
 7 |     id: download
 8 |     attributes:
 9 |       label: Installation Method | 安装方法与平台
10 |       options:
11 |         - Please choose | 请选择
12 |         - Pip Install (I ignored requirements.txt)
13 |         - Pip Install (I used latest requirements.txt)
14 |         - OneKeyInstall (一键安装脚本-windows)
15 |         - OneKeyInstall (一键安装脚本-mac)
16 |         - Anaconda (I ignored requirements.txt)
17 |         - Anaconda (I used latest requirements.txt)
18 |         - Docker(Windows/Mac)
19 |         - Docker(Linux)
20 |         - Docker-Compose(Windows/Mac)
21 |         - Docker-Compose(Linux)
22 |         - Huggingface
23 |         - Others (Please Describe)
24 |     validations:
25 |       required: true
26 | 
27 |   - type: dropdown
28 |     id: version
29 |     attributes:
30 |       label: Version | 版本
31 |       options:
32 |         - Please choose | 请选择
33 |         - Latest | 最新版
34 |         - Others | 非最新版
35 |     validations:
36 |       required: true
37 | 
38 |   - type: dropdown
39 |     id: os
40 |     attributes:
41 |       label: OS | 操作系统
42 |       options:
43 |         - Please choose | 请选择
44 |         - Windows
45 |         - Mac
46 |         - Linux
47 |         - Docker
48 |     validations:
49 |       required: true
50 | 
51 |   - type: textarea
52 |     id: describe
53 |     attributes:
54 |       label: Describe the bug | 简述
55 |       description: Describe the bug | 简述
56 |     validations:
57 |       required: true
58 | 
59 |   - type: textarea
60 |     id: screenshot
61 |     attributes:
62 |       label: Screen Shot | 有帮助的截图
63 |       description: Screen Shot | 有帮助的截图
64 |     validations:
65 |       required: true
66 | 
67 |   - type: textarea
68 |     id: traceback
69 |     attributes:
70 |       label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
71 |       description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
72 | 


--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
 1 | name: Feature Request | 功能请求
 2 | description: "Feature Request"
 3 | title: "[Feature]: "
 4 | labels: []
 5 | body:
 6 |   - type: dropdown
 7 |     id: download
 8 |     attributes:
 9 |       label: Class | 类型
10 |       options:
11 |         - Please choose | 请选择
12 |         - 其他
13 |         - 函数插件
14 |         - 大语言模型
15 |         - 程序主体
16 |     validations:
17 |       required: false
18 | 
19 |   - type: textarea
20 |     id: traceback
21 |     attributes:
22 |       label: Feature Request | 功能请求
23 |       description: Feature Request | 功能请求
24 | 


--------------------------------------------------------------------------------
/.github/workflows/build-with-all-capacity.yml:
--------------------------------------------------------------------------------
 1 | # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
 2 | name: build-with-all-capacity
 3 | 
 4 | on:
 5 |   push:
 6 |     branches:
 7 |       - 'master'
 8 | 
 9 | env:
10 |   REGISTRY: ghcr.io
11 |   IMAGE_NAME: ${{ github.repository }}_with_all_capacity
12 | 
13 | jobs:
14 |   build-and-push-image:
15 |     runs-on: ubuntu-latest
16 |     permissions:
17 |       contents: read
18 |       packages: write
19 | 
20 |     steps:
21 |       - name: Checkout repository
22 |         uses: actions/checkout@v3
23 | 
24 |       - name: Log in to the Container registry
25 |         uses: docker/login-action@v2
26 |         with:
27 |           registry: ${{ env.REGISTRY }}
28 |           username: ${{ github.actor }}
29 |           password: ${{ secrets.GITHUB_TOKEN }}
30 | 
31 |       - name: Extract metadata (tags, labels) for Docker
32 |         id: meta
33 |         uses: docker/metadata-action@v4
34 |         with:
35 |           images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
36 | 
37 |       - name: Build and push Docker image
38 |         uses: docker/build-push-action@v4
39 |         with:
40 |           context: .
41 |           push: true
42 |           file: docs/GithubAction+AllCapacity
43 |           tags: ${{ steps.meta.outputs.tags }}
44 |           labels: ${{ steps.meta.outputs.labels }}
45 | 


--------------------------------------------------------------------------------
/.github/workflows/build-with-audio-assistant.yml:
--------------------------------------------------------------------------------
 1 | # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
 2 | name: build-with-audio-assistant
 3 | 
 4 | on:
 5 |   push:
 6 |     branches:
 7 |       - 'master'
 8 | 
 9 | env:
10 |   REGISTRY: ghcr.io
11 |   IMAGE_NAME: ${{ github.repository }}_audio_assistant
12 | 
13 | jobs:
14 |   build-and-push-image:
15 |     runs-on: ubuntu-latest
16 |     permissions:
17 |       contents: read
18 |       packages: write
19 | 
20 |     steps:
21 |       - name: Checkout repository
22 |         uses: actions/checkout@v3
23 | 
24 |       - name: Log in to the Container registry
25 |         uses: docker/login-action@v2
26 |         with:
27 |           registry: ${{ env.REGISTRY }}
28 |           username: ${{ github.actor }}
29 |           password: ${{ secrets.GITHUB_TOKEN }}
30 | 
31 |       - name: Extract metadata (tags, labels) for Docker
32 |         id: meta
33 |         uses: docker/metadata-action@v4
34 |         with:
35 |           images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
36 | 
37 |       - name: Build and push Docker image
38 |         uses: docker/build-push-action@v4
39 |         with:
40 |           context: .
41 |           push: true
42 |           file: docs/GithubAction+NoLocal+AudioAssistant
43 |           tags: ${{ steps.meta.outputs.tags }}
44 |           labels: ${{ steps.meta.outputs.labels }}
45 | 


--------------------------------------------------------------------------------
/.github/workflows/build-with-chatglm.yml:
--------------------------------------------------------------------------------
 1 | # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
 2 | name: build-with-chatglm
 3 | 
 4 | on:
 5 |   push:
 6 |     branches:
 7 |       - 'master'
 8 | 
 9 | env:
10 |   REGISTRY: ghcr.io
11 |   IMAGE_NAME: ${{ github.repository }}_chatglm_moss
12 | 
13 | jobs:
14 |   build-and-push-image:
15 |     runs-on: ubuntu-latest
16 |     permissions:
17 |       contents: read
18 |       packages: write
19 | 
20 |     steps:
21 |       - name: Checkout repository
22 |         uses: actions/checkout@v3
23 | 
24 |       - name: Log in to the Container registry
25 |         uses: docker/login-action@v2
26 |         with:
27 |           registry: ${{ env.REGISTRY }}
28 |           username: ${{ github.actor }}
29 |           password: ${{ secrets.GITHUB_TOKEN }}
30 | 
31 |       - name: Extract metadata (tags, labels) for Docker
32 |         id: meta
33 |         uses: docker/metadata-action@v4
34 |         with:
35 |           images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
36 | 
37 |       - name: Build and push Docker image
38 |         uses: docker/build-push-action@v4
39 |         with:
40 |           context: .
41 |           push: true
42 |           file: docs/GithubAction+ChatGLM+Moss
43 |           tags: ${{ steps.meta.outputs.tags }}
44 |           labels: ${{ steps.meta.outputs.labels }}
45 | 


--------------------------------------------------------------------------------
/.github/workflows/build-with-latex-arm.yml:
--------------------------------------------------------------------------------
 1 | # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
 2 | name: build-with-latex-arm
 3 | 
 4 | on:
 5 |   push:
 6 |     branches:
 7 |       - "master"
 8 | 
 9 | env:
10 |   REGISTRY: ghcr.io
11 |   IMAGE_NAME: ${{ github.repository }}_with_latex_arm
12 | 
13 | jobs:
14 |   build-and-push-image:
15 |     runs-on: ubuntu-latest
16 |     permissions:
17 |       contents: read
18 |       packages: write
19 | 
20 |     steps:
21 |       - name: Set up QEMU
22 |         uses: docker/setup-qemu-action@v3
23 | 
24 |       - name: Set up Docker Buildx
25 |         uses: docker/setup-buildx-action@v3
26 | 
27 |       - name: Checkout repository
28 |         uses: actions/checkout@v4
29 | 
30 |       - name: Log in to the Container registry
31 |         uses: docker/login-action@v3
32 |         with:
33 |           registry: ${{ env.REGISTRY }}
34 |           username: ${{ github.actor }}
35 |           password: ${{ secrets.GITHUB_TOKEN }}
36 | 
37 |       - name: Extract metadata (tags, labels) for Docker
38 |         id: meta
39 |         uses: docker/metadata-action@v4
40 |         with:
41 |           images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
42 | 
43 |       - name: Build and push Docker image
44 |         uses: docker/build-push-action@v6
45 |         with:
46 |           context: .
47 |           push: true
48 |           platforms: linux/arm64
49 |           file: docs/GithubAction+NoLocal+Latex
50 |           tags: ${{ steps.meta.outputs.tags }}
51 |           labels: ${{ steps.meta.outputs.labels }}


--------------------------------------------------------------------------------
/.github/workflows/build-with-latex.yml:
--------------------------------------------------------------------------------
 1 | # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
 2 | name: build-with-latex
 3 | 
 4 | on:
 5 |   push:
 6 |     branches:
 7 |       - 'master'
 8 | 
 9 | env:
10 |   REGISTRY: ghcr.io
11 |   IMAGE_NAME: ${{ github.repository }}_with_latex
12 | 
13 | jobs:
14 |   build-and-push-image:
15 |     runs-on: ubuntu-latest
16 |     permissions:
17 |       contents: read
18 |       packages: write
19 | 
20 |     steps:
21 |       - name: Checkout repository
22 |         uses: actions/checkout@v3
23 | 
24 |       - name: Log in to the Container registry
25 |         uses: docker/login-action@v2
26 |         with:
27 |           registry: ${{ env.REGISTRY }}
28 |           username: ${{ github.actor }}
29 |           password: ${{ secrets.GITHUB_TOKEN }}
30 | 
31 |       - name: Extract metadata (tags, labels) for Docker
32 |         id: meta
33 |         uses: docker/metadata-action@v4
34 |         with:
35 |           images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
36 | 
37 |       - name: Build and push Docker image
38 |         uses: docker/build-push-action@v4
39 |         with:
40 |           context: .
41 |           push: true
42 |           file: docs/GithubAction+NoLocal+Latex
43 |           tags: ${{ steps.meta.outputs.tags }}
44 |           labels: ${{ steps.meta.outputs.labels }}
45 | 


--------------------------------------------------------------------------------
/.github/workflows/build-without-local-llms.yml:
--------------------------------------------------------------------------------
 1 | # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
 2 | name: build-without-local-llms
 3 | 
 4 | on:
 5 |   push:
 6 |     branches:
 7 |       - 'master'
 8 | 
 9 | env:
10 |   REGISTRY: ghcr.io
11 |   IMAGE_NAME: ${{ github.repository }}_nolocal
12 | 
13 | jobs:
14 |   build-and-push-image:
15 |     runs-on: ubuntu-latest
16 |     permissions:
17 |       contents: read
18 |       packages: write
19 | 
20 |     steps:
21 |       - name: Checkout repository
22 |         uses: actions/checkout@v3
23 | 
24 |       - name: Log in to the Container registry
25 |         uses: docker/login-action@v2
26 |         with:
27 |           registry: ${{ env.REGISTRY }}
28 |           username: ${{ github.actor }}
29 |           password: ${{ secrets.GITHUB_TOKEN }}
30 | 
31 |       - name: Extract metadata (tags, labels) for Docker
32 |         id: meta
33 |         uses: docker/metadata-action@v4
34 |         with:
35 |           images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
36 | 
37 |       - name: Build and push Docker image
38 |         uses: docker/build-push-action@v4
39 |         with:
40 |           context: .
41 |           push: true
42 |           file: docs/GithubAction+NoLocal
43 |           tags: ${{ steps.meta.outputs.tags }}
44 |           labels: ${{ steps.meta.outputs.labels }}
45 | 


--------------------------------------------------------------------------------
/.github/workflows/conda-pack-windows.yml:
--------------------------------------------------------------------------------
 1 | name: Create Conda Environment Package
 2 | 
 3 | on:
 4 |   workflow_dispatch:
 5 | 
 6 | jobs:
 7 |   build:
 8 |     runs-on: windows-latest
 9 |     
10 |     steps:
11 |     - name: Checkout repository
12 |       uses: actions/checkout@v4
13 |       
14 |     - name: Setup Miniconda
15 |       uses: conda-incubator/setup-miniconda@v3
16 |       with:
17 |         auto-activate-base: true
18 |         activate-environment: ""
19 |         
20 |     - name: Create new Conda environment
21 |       shell: bash -l {0}
22 |       run: |
23 |         conda create -n gpt python=3.11 -y
24 |         conda activate gpt
25 |         
26 |     - name: Install requirements
27 |       shell: bash -l {0}
28 |       run: |
29 |         conda activate gpt
30 |         pip install -r requirements.txt
31 |         
32 |     - name: Install conda-pack
33 |       shell: bash -l {0}
34 |       run: |
35 |         conda activate gpt
36 |         conda install conda-pack -y
37 |         
38 |     - name: Pack conda environment
39 |       shell: bash -l {0}
40 |       run: |
41 |         conda activate gpt
42 |         conda pack -n gpt -o gpt.tar.gz
43 |       
44 |     - name: Create workspace zip
45 |       shell: pwsh
46 |       run: |
47 |         mkdir workspace
48 |         Get-ChildItem -Exclude "workspace" | Copy-Item -Destination workspace -Recurse
49 |         Remove-Item -Path workspace/.git* -Recurse -Force -ErrorAction SilentlyContinue
50 |         Copy-Item gpt.tar.gz workspace/ -Force
51 |     
52 |     - name: Upload packed files
53 |       uses: actions/upload-artifact@v4
54 |       with:
55 |         name: gpt-academic-package
56 |         path: workspace
57 | 


--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
 1 | # This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
 2 | #
 3 | # You can adjust the behavior by modifying this file.
 4 | # For more information, see:
 5 | # https://github.com/actions/stale
 6 | 
 7 | name: 'Close stale issues and PRs'
 8 | on:
 9 |   schedule:
10 |     - cron: '*/30 * * * *'
11 | 
12 | jobs:
13 |   stale:
14 |     runs-on: ubuntu-latest
15 |     permissions:
16 |       issues: write
17 |       pull-requests: read
18 | 
19 |     steps:
20 |       - uses: actions/stale@v8
21 |         with:
22 |           stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
23 |           days-before-stale: 100
24 |           days-before-close: 7
25 | 


--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
  1 | # Byte-compiled / optimized / DLL files
  2 | __pycache__/
  3 | *.py[cod]
  4 | *$py.class
  5 | 
  6 | # C extensions
  7 | *.so
  8 | 
  9 | # Distribution / packaging
 10 | .Python
 11 | build/
 12 | develop-eggs/
 13 | dist/
 14 | downloads/
 15 | eggs/
 16 | .eggs/
 17 | lib/
 18 | lib64/
 19 | parts/
 20 | sdist/
 21 | var/
 22 | wheels/
 23 | pip-wheel-metadata/
 24 | share/python-wheels/
 25 | *.egg-info/
 26 | .installed.cfg
 27 | *.egg
 28 | MANIFEST
 29 | 
 30 | # PyInstaller
 31 | #  Usually these files are written by a python script from a template
 32 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
 33 | *.manifest
 34 | *.spec
 35 | # Installer logs
 36 | pip-log.txt
 37 | pip-delete-this-directory.txt
 38 | 
 39 | # Unit test / coverage reports
 40 | htmlcov/
 41 | .tox/
 42 | .nox/
 43 | .coverage
 44 | .coverage.*
 45 | .cache
 46 | nosetests.xml
 47 | coverage.xml
 48 | *.cover
 49 | *.py,cover
 50 | .hypothesis/
 51 | .pytest_cache/
 52 | 
 53 | # Translations
 54 | *.mo
 55 | *.pot
 56 | github
 57 | .github
 58 | TEMP
 59 | TRASH
 60 | 
 61 | # Django stuff:
 62 | *.log
 63 | local_settings.py
 64 | db.sqlite3
 65 | db.sqlite3-journal
 66 | 
 67 | # Flask stuff:
 68 | instance/
 69 | .webassets-cache
 70 | 
 71 | # Scrapy stuff:
 72 | .scrapy
 73 | 
 74 | # Sphinx documentation
 75 | docs/_build/
 76 | 
 77 | # PyBuilder
 78 | target/
 79 | 
 80 | # Jupyter Notebook
 81 | .ipynb_checkpoints
 82 | 
 83 | # IPython
 84 | profile_default/
 85 | ipython_config.py
 86 | 
 87 | # pyenv
 88 | .python-version
 89 | 
 90 | # pipenv
 91 | #   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
 92 | #   However, in case of collaboration, if having platform-specific dependencies or dependencies
 93 | #   having no cross-platform support, pipenv may install dependencies that don't work, or not
 94 | #   install all needed dependencies.
 95 | #Pipfile.lock
 96 | 
 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
 98 | __pypackages__/
 99 | 
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 | 
104 | # SageMath parsed files
105 | *.sage.py
106 | 
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 | 
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 | 
120 | # Rope project settings
121 | .ropeproject
122 | 
123 | # mkdocs documentation
124 | /site
125 | 
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 | 
131 | # Pyre type checker
132 | .pyre/
133 | 
134 | # macOS files
135 | .DS_Store
136 | 
137 | .vscode
138 | .idea
139 | 
140 | history
141 | ssr_conf
142 | config_private.py
143 | gpt_log
144 | private.md
145 | private_upload
146 | other_llms
147 | cradle*
148 | debug*
149 | private*
150 | crazy_functions/test_project/pdf_and_word
151 | crazy_functions/test_samples
152 | request_llms/jittorllms
153 | multi-language
154 | request_llms/moss
155 | media
156 | flagged
157 | request_llms/ChatGLM-6b-onnx-u8s8
158 | .pre-commit-config.yaml
159 | test.*
160 | temp.*
161 | objdump*
162 | *.min.*.js
163 | TODO
164 | experimental_mods
165 | search_results
166 | 


--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
 1 | # 此Dockerfile适用于“无本地模型”的迷你运行环境构建
 2 | # 如果需要使用chatglm等本地模型或者latex运行依赖,请参考 docker-compose.yml
 3 | # - 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
 4 | # - 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
 5 | # - 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
 6 | 
 7 | FROM ghcr.io/astral-sh/uv:python3.12-bookworm
 8 | 
 9 | # 非必要步骤,更换pip源 (以下三行,可以删除)
10 | RUN echo '[global]' > /etc/pip.conf && \
11 |     echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
12 |     echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
13 | 
14 | # 语音输出功能(以下1,2行更换阿里源,第3,4行安装ffmpeg,都可以删除) 
15 | RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
16 |     sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
17 |     apt-get update
18 | RUN apt-get install ffmpeg -y
19 | RUN apt-get clean
20 | 
21 | # 进入工作路径(必要)
22 | WORKDIR /gpt
23 | 
24 | # 安装大部分依赖,利用Docker缓存加速以后的构建 (以下两行,可以删除)
25 | COPY requirements.txt ./
26 | RUN uv venv --python=3.12 && uv pip install --verbose -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
27 | ENV PATH="/gpt/.venv/bin:$PATH"
28 | RUN python -c 'import loguru'
29 | 
30 | # 装载项目文件,安装剩余依赖(必要)
31 | COPY . .
32 | RUN uv venv --python=3.12 && uv pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
33 | 
34 | # # 非必要步骤,用于预热模块(可以删除)
35 | RUN python -c 'from check_proxy import warm_up_modules; warm_up_modules()'
36 | 
37 | # 启动(必要)
38 | CMD ["bash", "-c", "python main.py"]
39 | 


--------------------------------------------------------------------------------
/crazy_functions/Image_Generate_Wrap.py:
--------------------------------------------------------------------------------
 1 | 
 2 | from toolbox import get_conf, update_ui
 3 | from crazy_functions.Image_Generate import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
 4 | from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
 5 | 
 6 | 
 7 | class ImageGen_Wrap(GptAcademicPluginTemplate):
 8 |     def __init__(self):
 9 |         """
10 |         请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
11 |         """
12 |         pass
13 | 
14 |     def define_arg_selection_menu(self):
15 |         """
16 |         定义插件的二级选项菜单
17 | 
18 |         第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
19 |         第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
20 | 
21 |         """
22 |         gui_definition = {
23 |             "main_input":
24 |                 ArgProperty(title="输入图片描述", description="需要生成图像的文本描述,尽量使用英文", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
25 |             "model_name":
26 |                 ArgProperty(title="模型", options=["DALLE2", "DALLE3"], default_value="DALLE3", description="无", type="dropdown").model_dump_json(),
27 |             "resolution":
28 |                 ArgProperty(title="分辨率", options=["256x256(限DALLE2)", "512x512(限DALLE2)", "1024x1024", "1792x1024(限DALLE3)", "1024x1792(限DALLE3)"], default_value="1024x1024", description="无", type="dropdown").model_dump_json(),
29 |             "quality (仅DALLE3生效)":
30 |                 ArgProperty(title="质量", options=["standard", "hd"], default_value="standard", description="无", type="dropdown").model_dump_json(),
31 |             "style (仅DALLE3生效)":
32 |                 ArgProperty(title="风格", options=["vivid", "natural"], default_value="vivid", description="无", type="dropdown").model_dump_json(),
33 | 
34 |         }
35 |         return gui_definition
36 | 
37 |     def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
38 |         """
39 |         执行插件
40 |         """
41 |         # 分辨率
42 |         resolution = plugin_kwargs["resolution"].replace("(限DALLE2)", "").replace("(限DALLE3)", "")
43 | 
44 |         if plugin_kwargs["model_name"] == "DALLE2":
45 |             plugin_kwargs["advanced_arg"] = resolution
46 |             yield from 图片生成_DALLE2(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
47 | 
48 |         elif plugin_kwargs["model_name"] == "DALLE3":
49 |             quality = plugin_kwargs["quality (仅DALLE3生效)"]
50 |             style = plugin_kwargs["style (仅DALLE3生效)"]
51 |             plugin_kwargs["advanced_arg"] = f"{resolution}-{quality}-{style}"
52 |             yield from 图片生成_DALLE3(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
53 | 
54 |         else:
55 |             chatbot.append([None, "抱歉,找不到该模型"])
56 |             yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
57 | 


--------------------------------------------------------------------------------
/crazy_functions/Internet_GPT_Wrap.py:
--------------------------------------------------------------------------------
 1 | import random
 2 | from toolbox import get_conf
 3 | from crazy_functions.Internet_GPT import 连接网络回答问题
 4 | from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
 5 | 
 6 | 
 7 | class NetworkGPT_Wrap(GptAcademicPluginTemplate):
 8 |     def __init__(self):
 9 |         """
10 |         请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
11 |         """
12 |         pass
13 | 
14 |     def define_arg_selection_menu(self):
15 |         """
16 |         定义插件的二级选项菜单
17 | 
18 |         第一个参数,名称`main_input`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
19 |         第二个参数,名称`advanced_arg`,参数`type`声明这是一个文本框,文本框上方显示`title`,文本框内部显示`description`,`default_value`为默认值;
20 |         第三个参数,名称`allow_cache`,参数`type`声明这是一个下拉菜单,下拉菜单上方显示`title`+`description`,下拉菜单的选项为`options`,`default_value`为下拉菜单默认值;
21 | 
22 |         """
23 |         urls = get_conf("SEARXNG_URLS")
24 |         url = random.choice(urls)
25 | 
26 |         gui_definition = {
27 |             "main_input":
28 |                 ArgProperty(title="输入问题", description="待通过互联网检索的问题,会自动读取输入框内容", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
29 |             "categories":
30 |                 ArgProperty(title="搜索分类", options=["网页", "学术论文"], default_value="网页", description="无", type="dropdown").model_dump_json(),
31 |             "engine":
32 |                 ArgProperty(title="选择搜索引擎", options=["Mixed", "bing", "google", "duckduckgo"], default_value="google", description="无", type="dropdown").model_dump_json(),
33 |             "optimizer":
34 |                 ArgProperty(title="搜索优化", options=["关闭", "开启", "开启(增强)"], default_value="关闭", description="是否使用搜索增强。注意这可能会消耗较多token", type="dropdown").model_dump_json(),
35 |             "searxng_url":
36 |                 ArgProperty(title="Searxng服务地址", description="输入Searxng的地址", default_value=url, type="string").model_dump_json(), # 主输入,自动从输入框同步
37 | 
38 |         }
39 |         return gui_definition
40 | 
41 |     def execute(txt, llm_kwargs, plugin_kwargs:dict, chatbot, history, system_prompt, user_request):
42 |         """
43 |         执行插件
44 |         """
45 |         if plugin_kwargs.get("categories", None) == "网页": plugin_kwargs["categories"] = "general"
46 |         elif plugin_kwargs.get("categories", None) == "学术论文": plugin_kwargs["categories"] = "science"
47 |         else: plugin_kwargs["categories"] = "general"
48 |         yield from 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
49 | 
50 | 


--------------------------------------------------------------------------------
/crazy_functions/PDF_Translate_Wrap.py:
--------------------------------------------------------------------------------
 1 | from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
 2 | from .PDF_Translate import 批量翻译PDF文档
 3 | 
 4 | 
 5 | class PDF_Tran(GptAcademicPluginTemplate):
 6 |     def __init__(self):
 7 |         """
 8 |         请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
 9 |         """
10 |         pass
11 | 
12 |     def define_arg_selection_menu(self):
13 |         """
14 |         定义插件的二级选项菜单
15 |         """
16 |         gui_definition = {
17 |             "main_input":
18 |                 ArgProperty(title="PDF文件路径", description="未指定路径,请上传文件后,再点击该插件", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
19 |             "additional_prompt":
20 |                 ArgProperty(title="额外提示词", description="例如:对专有名词、翻译语气等方面的要求", default_value="", type="string").model_dump_json(), # 高级参数输入区,自动同步
21 |             "pdf_parse_method":
22 |                 ArgProperty(title="PDF解析方法", options=["DOC2X", "GROBID", "Classic"], description="无", default_value="GROBID", type="dropdown").model_dump_json(),
23 |         }
24 |         return gui_definition
25 | 
26 |     def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
27 |         """
28 |         执行插件
29 |         """
30 |         main_input = plugin_kwargs["main_input"]
31 |         additional_prompt = plugin_kwargs["additional_prompt"]
32 |         pdf_parse_method = plugin_kwargs["pdf_parse_method"]
33 |         yield from 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)


--------------------------------------------------------------------------------
/crazy_functions/SourceCode_Comment_Wrap.py:
--------------------------------------------------------------------------------
 1 | 
 2 | from toolbox import get_conf, update_ui
 3 | from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
 4 | from crazy_functions.SourceCode_Comment import 注释Python项目
 5 | 
 6 | class SourceCodeComment_Wrap(GptAcademicPluginTemplate):
 7 |     def __init__(self):
 8 |         """
 9 |         请注意`execute`会执行在不同的线程中,因此您在定义和使用类变量时,应当慎之又慎!
10 |         """
11 |         pass
12 | 
13 |     def define_arg_selection_menu(self):
14 |         """
15 |         定义插件的二级选项菜单
16 |         """
17 |         gui_definition = {
18 |             "main_input":
19 |                 ArgProperty(title="路径", description="程序路径(上传文件后自动填写)", default_value="", type="string").model_dump_json(), # 主输入,自动从输入框同步
20 |             "use_chinese":
21 |                 ArgProperty(title="注释语言", options=["英文", "中文"], default_value="英文", description="无", type="dropdown").model_dump_json(),
22 |             # "use_emoji":
23 |                 # ArgProperty(title="在注释中使用emoji", options=["禁止", "允许"], default_value="禁止", description="无", type="dropdown").model_dump_json(),
24 |         }
25 |         return gui_definition
26 | 
27 |     def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
28 |         """
29 |         执行插件
30 |         """
31 |         if plugin_kwargs["use_chinese"] == "中文": 
32 |             plugin_kwargs["use_chinese"] = True
33 |         else: 
34 |             plugin_kwargs["use_chinese"] = False
35 | 
36 |         yield from 注释Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
37 | 


--------------------------------------------------------------------------------
/crazy_functions/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/crazy_functions/__init__.py


--------------------------------------------------------------------------------
/crazy_functions/agent_fns/auto_agent.py:
--------------------------------------------------------------------------------
 1 | from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
 2 | from toolbox import report_exception, get_log_folder, update_ui_latest_msg, Singleton
 3 | from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
 4 | from crazy_functions.agent_fns.general import AutoGenGeneral
 5 | 
 6 | 
 7 | 
 8 | class AutoGenMath(AutoGenGeneral):
 9 | 
10 |     def define_agents(self):
11 |         from autogen import AssistantAgent, UserProxyAgent
12 |         return [
13 |             {
14 |                 "name": "assistant",            # name of the agent.
15 |                 "cls":  AssistantAgent,         # class of the agent.
16 |             },
17 |             {
18 |                 "name": "user_proxy",           # name of the agent.
19 |                 "cls":  UserProxyAgent,         # class of the agent.
20 |                 "human_input_mode": "ALWAYS",   # always ask for human input.
21 |                 "llm_config": False,            # disables llm-based auto reply.
22 |             },
23 |         ]


--------------------------------------------------------------------------------
/crazy_functions/agent_fns/echo_agent.py:
--------------------------------------------------------------------------------
 1 | from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
 2 | from loguru import logger
 3 | 
 4 | class EchoDemo(PluginMultiprocessManager):
 5 |     def subprocess_worker(self, child_conn):
 6 |         # ⭐⭐ 子进程
 7 |         self.child_conn = child_conn
 8 |         while True:
 9 |             msg = self.child_conn.recv() # PipeCom
10 |             if msg.cmd == "user_input":
11 |                 # wait father user input
12 |                 self.child_conn.send(PipeCom("show", msg.content))
13 |                 wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.")
14 |                 if not wait_success:
15 |                     # wait timeout, terminate this subprocess_worker
16 |                     break
17 |             elif msg.cmd == "terminate":
18 |                 self.child_conn.send(PipeCom("done", ""))
19 |                 break
20 |         logger.info('[debug] subprocess_worker terminated')


--------------------------------------------------------------------------------
/crazy_functions/agent_fns/persistent.py:
--------------------------------------------------------------------------------
 1 | from toolbox import Singleton
 2 | @Singleton
 3 | class GradioMultiuserManagerForPersistentClasses():
 4 |     def __init__(self):
 5 |         self.mapping = {}
 6 | 
 7 |     def already_alive(self, key):
 8 |         return (key in self.mapping) and (self.mapping[key].is_alive())
 9 | 
10 |     def set(self, key, x):
11 |         self.mapping[key] = x
12 |         return self.mapping[key]
13 | 
14 |     def get(self, key):
15 |         return self.mapping[key]
16 | 
17 | 


--------------------------------------------------------------------------------
/crazy_functions/agent_fns/python_comment_compare.html:
--------------------------------------------------------------------------------
 1 | <!DOCTYPE html>
 2 | <html lang="zh-CN">
 3 | <head>
 4 |     <style>ADVANCED_CSS</style>
 5 |     <meta charset="UTF-8">
 6 |     <title>源文件对比</title>
 7 |     <style>
 8 |         body {
 9 |             font-family: Arial, sans-serif;
10 |             display: flex;
11 |             justify-content: center;
12 |             align-items: center;
13 |             height: 100vh;
14 |             margin: 0;
15 |         }
16 |         .container {
17 |             display: flex;
18 |             width: 95%;
19 |             height: -webkit-fill-available;
20 |         }
21 |         .code-container {
22 |             flex: 1;
23 |             margin: 0px;
24 |             padding: 0px;
25 |             border: 1px solid #ccc;
26 |             background-color: #f9f9f9;
27 |             overflow: auto;
28 |         }
29 |         pre {
30 |             white-space: pre-wrap;
31 |             word-wrap: break-word;
32 |         }
33 |     </style>
34 | </head>
35 | <body>
36 | <div class="container">
37 | <div class="code-container">
38 | REPLACE_CODE_FILE_LEFT
39 | </div>
40 | <div class="code-container">
41 | REPLACE_CODE_FILE_RIGHT
42 | </div>
43 | </div>
44 | </body>
45 | </html>


--------------------------------------------------------------------------------
/crazy_functions/agent_fns/watchdog.py:
--------------------------------------------------------------------------------
 1 | import threading, time
 2 | from loguru import logger
 3 | 
 4 | class WatchDog():
 5 |     def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
 6 |         self.last_feed = None
 7 |         self.timeout = timeout
 8 |         self.bark_fn = bark_fn
 9 |         self.interval = interval
10 |         self.msg = msg
11 |         self.kill_dog = False
12 | 
13 |     def watch(self):
14 |         while True:
15 |             if self.kill_dog: break
16 |             if time.time() - self.last_feed > self.timeout:
17 |                 if len(self.msg) > 0: logger.info(self.msg)
18 |                 self.bark_fn()
19 |                 break
20 |             time.sleep(self.interval)
21 | 
22 |     def begin_watch(self):
23 |         self.last_feed = time.time()
24 |         th = threading.Thread(target=self.watch)
25 |         th.daemon = True
26 |         th.start()
27 | 
28 |     def feed(self):
29 |         self.last_feed = time.time()
30 | 


--------------------------------------------------------------------------------
/crazy_functions/ast_fns/comment_remove.py:
--------------------------------------------------------------------------------
 1 | import token
 2 | import tokenize
 3 | import copy
 4 | import io
 5 | 
 6 | 
 7 | def remove_python_comments(input_source: str) -> str:
 8 |     source_flag = copy.copy(input_source)
 9 |     source = io.StringIO(input_source)
10 |     ls = input_source.split('\n')
11 |     prev_toktype = token.INDENT
12 |     readline = source.readline
13 | 
14 |     def get_char_index(lineno, col):
15 |         # find the index of the char in the source code
16 |         if lineno == 1:
17 |             return len('\n'.join(ls[:(lineno-1)])) + col
18 |         else:
19 |             return len('\n'.join(ls[:(lineno-1)])) + col + 1
20 | 
21 |     def replace_char_between(start_lineno, start_col, end_lineno, end_col, source, replace_char, ls):
22 |         # replace char between start_lineno, start_col and end_lineno, end_col with replace_char, but keep '\n' and ' '
23 |         b = get_char_index(start_lineno, start_col)
24 |         e = get_char_index(end_lineno, end_col)
25 |         for i in range(b, e):
26 |             if source[i] == '\n':
27 |                 source = source[:i] + '\n' + source[i+1:]
28 |             elif source[i] == ' ':
29 |                 source = source[:i] + ' ' + source[i+1:]
30 |             else:
31 |                 source = source[:i] + replace_char + source[i+1:]
32 |         return source
33 | 
34 |     tokgen = tokenize.generate_tokens(readline)
35 |     for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen:
36 |         if toktype == token.STRING and (prev_toktype == token.INDENT):
37 |             source_flag = replace_char_between(slineno, scol, elineno, ecol, source_flag, ' ', ls)
38 |         elif toktype == token.STRING and (prev_toktype == token.NEWLINE):
39 |             source_flag = replace_char_between(slineno, scol, elineno, ecol, source_flag, ' ', ls)
40 |         elif toktype == tokenize.COMMENT:
41 |             source_flag = replace_char_between(slineno, scol, elineno, ecol, source_flag, ' ', ls)
42 |         prev_toktype = toktype
43 |     return source_flag
44 | 
45 | 
46 | # 示例使用
47 | if __name__ == "__main__":
48 |     with open("source.py", "r", encoding="utf-8") as f:
49 |         source_code = f.read()
50 | 
51 |     cleaned_code = remove_python_comments(source_code)
52 | 
53 |     with open("cleaned_source.py", "w", encoding="utf-8") as f:
54 |         f.write(cleaned_code)


--------------------------------------------------------------------------------
/crazy_functions/doc_fns/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/crazy_functions/doc_fns/__init__.py


--------------------------------------------------------------------------------
/crazy_functions/doc_fns/conversation_doc/markdown_doc.py:
--------------------------------------------------------------------------------
 1 | 
 2 | class MarkdownFormatter:
 3 |     """Markdown格式文档生成器 - 用于生成对话记录的markdown文档"""
 4 | 
 5 |     def __init__(self):
 6 |         self.content = []
 7 | 
 8 |     def _add_content(self, text: str):
 9 |         """添加正文内容"""
10 |         if text:
11 |             self.content.append(f"\n{text}\n")
12 | 
13 |     def create_document(self, history: list) -> str:
14 |         """
15 |         创建完整的Markdown文档
16 |         Args:
17 |             history: 历史记录列表,偶数位置为问题,奇数位置为答案
18 |         Returns:
19 |             str: 生成的Markdown文本
20 |         """
21 |         self.content = []
22 |         
23 |         # 处理问答对
24 |         for i in range(0, len(history), 2):
25 |             question = history[i]
26 |             answer = history[i + 1]
27 |             
28 |             # 添加问题
29 |             self.content.append(f"\n### 问题 {i//2 + 1}")
30 |             self._add_content(question)
31 |             
32 |             # 添加回答
33 |             self.content.append(f"\n### 回答 {i//2 + 1}")
34 |             self._add_content(answer)
35 |             
36 |             # 添加分隔线
37 |             self.content.append("\n---\n")
38 | 
39 |         return "\n".join(self.content)
40 | 


--------------------------------------------------------------------------------
/crazy_functions/doc_fns/conversation_doc/txt_doc.py:
--------------------------------------------------------------------------------
 1 | 
 2 | import re
 3 | 
 4 | 
 5 | def convert_markdown_to_txt(markdown_text):
 6 |     """Convert markdown text to plain text while preserving formatting"""
 7 |     # Standardize line endings
 8 |     markdown_text = markdown_text.replace('\r\n', '\n').replace('\r', '\n')
 9 | 
10 |     # 1. Handle headers but keep their formatting instead of removing them
11 |     markdown_text = re.sub(r'^#\s+(.+)
#39;, r'# \1', markdown_text, flags=re.MULTILINE)
12 |     markdown_text = re.sub(r'^##\s+(.+)
#39;, r'## \1', markdown_text, flags=re.MULTILINE)
13 |     markdown_text = re.sub(r'^###\s+(.+)
#39;, r'### \1', markdown_text, flags=re.MULTILINE)
14 | 
15 |     # 2. Handle bold and italic - simply remove markers
16 |     markdown_text = re.sub(r'\*\*(.+?)\*\*', r'\1', markdown_text)
17 |     markdown_text = re.sub(r'\*(.+?)\*', r'\1', markdown_text)
18 | 
19 |     # 3. Handle lists but preserve formatting
20 |     markdown_text = re.sub(r'^\s*[-*+]\s+(.+?)(?=\n|$)', r'• \1', markdown_text, flags=re.MULTILINE)
21 | 
22 |     # 4. Handle links - keep only the text
23 |     markdown_text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'\1 (\2)', markdown_text)
24 | 
25 |     # 5. Handle HTML links - convert to user-friendly format
26 |     markdown_text = re.sub(r'<a href=[\'"]([^\'"]+)[\'"](?:\s+target=[\'"][^\'"]+[\'"])?>([^<]+)</a>', r'\2 (\1)',
27 |                            markdown_text)
28 | 
29 |     # 6. Preserve paragraph breaks
30 |     markdown_text = re.sub(r'\n{3,}', '\n\n', markdown_text)  # normalize multiple newlines to double newlines
31 | 
32 |     # 7. Clean up extra spaces but maintain indentation
33 |     markdown_text = re.sub(r' +', ' ', markdown_text)
34 | 
35 |     return markdown_text.strip()
36 | 
37 | 
38 | class TxtFormatter:
39 |     """Chat history TXT document generator"""
40 | 
41 |     def __init__(self):
42 |         self.content = []
43 |         self._setup_document()
44 | 
45 |     def _setup_document(self):
46 |         """Initialize document with header"""
47 |         self.content.append("=" * 50)
48 |         self.content.append("GPT-Academic对话记录".center(48))
49 |         self.content.append("=" * 50)
50 | 
51 |     def _format_header(self):
52 |         """Create document header with current date"""
53 |         from datetime import datetime
54 |         date_str = datetime.now().strftime('%Y年%m月%d日')
55 |         return [
56 |             date_str.center(48),
57 |             "\n"  # Add blank line after date
58 |         ]
59 | 
60 |     def create_document(self, history):
61 |         """Generate document from chat history"""
62 |         # Add header with date
63 |         self.content.extend(self._format_header())
64 | 
65 |         # Add conversation content
66 |         for i in range(0, len(history), 2):
67 |             question = history[i]
68 |             answer = convert_markdown_to_txt(history[i + 1]) if i + 1 < len(history) else ""
69 | 
70 |             if question:
71 |                 self.content.append(f"问题 {i // 2 + 1}:{str(question)}")
72 |                 self.content.append("")  # Add blank line
73 | 
74 |             if answer:
75 |                 self.content.append(f"回答 {i // 2 + 1}:{str(answer)}")
76 |                 self.content.append("")  # Add blank line
77 | 
78 |         # Join all content with newlines
79 |         return "\n".join(self.content)
80 | 


--------------------------------------------------------------------------------
/crazy_functions/doc_fns/read_fns/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/crazy_functions/doc_fns/read_fns/__init__.py


--------------------------------------------------------------------------------
/crazy_functions/doc_fns/read_fns/docx_reader.py:
--------------------------------------------------------------------------------
1 | import nltk
2 | nltk.data.path.append('~/nltk_data')
3 | nltk.download('averaged_perceptron_tagger', download_dir='~/nltk_data',
4 |              )
5 | nltk.download('punkt', download_dir='~/nltk_data',
6 |                )


--------------------------------------------------------------------------------
/crazy_functions/doc_fns/read_fns/unstructured_all/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/crazy_functions/doc_fns/read_fns/unstructured_all/__init__.py


--------------------------------------------------------------------------------
/crazy_functions/doc_fns/read_fns/unstructured_all/unstructured_md.py:
--------------------------------------------------------------------------------
 1 | from pathlib import Path
 2 | from crazy_functions.doc_fns.read_fns.unstructured_all.paper_structure_extractor import PaperStructureExtractor
 3 | 
 4 | def extract_and_save_as_markdown(paper_path, output_path=None):
 5 |     """
 6 |     提取论文结构并保存为Markdown格式
 7 |     
 8 |     参数:
 9 |         paper_path: 论文文件路径
10 |         output_path: 输出的Markdown文件路径,如果不指定,将使用与输入相同的文件名但扩展名为.md
11 |     
12 |     返回:
13 |         保存的Markdown文件路径
14 |     """
15 |     # 创建提取器
16 |     extractor = PaperStructureExtractor()
17 |     
18 |     # 解析文件路径
19 |     paper_path = Path(paper_path)
20 |     
21 |     # 如果未指定输出路径,使用相同文件名但扩展名为.md
22 |     if output_path is None:
23 |         output_path = paper_path.with_suffix('.md')
24 |     else:
25 |         output_path = Path(output_path)
26 |     
27 |     # 确保输出目录存在
28 |     output_path.parent.mkdir(parents=True, exist_ok=True)
29 |     
30 |     print(f"正在处理论文: {paper_path}")
31 |     
32 |     try:
33 |         # 提取论文结构
34 |         paper = extractor.extract_paper_structure(paper_path)
35 |         
36 |         # 生成Markdown内容
37 |         markdown_content = extractor.generate_markdown(paper)
38 |         
39 |         # 保存到文件
40 |         with open(output_path, 'w', encoding='utf-8') as f:
41 |             f.write(markdown_content)
42 |         
43 |         print(f"已成功保存Markdown文件: {output_path}")
44 |         
45 |         # 打印摘要信息
46 |         print("\n论文摘要信息:")
47 |         print(f"标题: {paper.metadata.title}")
48 |         print(f"作者: {', '.join(paper.metadata.authors)}")
49 |         print(f"关键词: {', '.join(paper.keywords)}")
50 |         print(f"章节数: {len(paper.sections)}")
51 |         print(f"图表数: {len(paper.figures)}")
52 |         print(f"表格数: {len(paper.tables)}")
53 |         print(f"公式数: {len(paper.formulas)}")
54 |         print(f"参考文献数: {len(paper.references)}")
55 |         
56 |         return output_path
57 |     
58 |     except Exception as e:
59 |         print(f"处理论文时出错: {e}")
60 |         import traceback
61 |         traceback.print_exc()
62 |         return None
63 | 
64 | # 使用示例
65 | if __name__ == "__main__":
66 |     # 替换为实际的论文文件路径
67 |     sample_paper = "crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf"
68 |     
69 |     # 可以指定输出路径,也可以使用默认路径
70 |     # output_file = "/path/to/output/paper_structure.md"
71 |     # extract_and_save_as_markdown(sample_paper, output_file)
72 |     
73 |     # 使用默认输出路径(与输入文件同名但扩展名为.md)
74 |     extract_and_save_as_markdown(sample_paper)
75 |     
76 |     # # 批量处理多个论文的示例
77 |     # paper_dir = Path("/path/to/papers/folder")
78 |     # output_dir = Path("/path/to/output/folder")
79 |     #
80 |     # # 确保输出目录存在
81 |     # output_dir.mkdir(parents=True, exist_ok=True)
82 |     #
83 |     # # 处理目录中的所有PDF文件
84 |     # for paper_file in paper_dir.glob("*.pdf"):
85 |     #     output_file = output_dir / f"{paper_file.stem}.md"
86 |     #     extract_and_save_as_markdown(paper_file, output_file)


--------------------------------------------------------------------------------
/crazy_functions/game_fns/game_ascii_art.py:
--------------------------------------------------------------------------------
 1 | from toolbox import CatchException, update_ui, update_ui_latest_msg
 2 | from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
 3 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 4 | from request_llms.bridge_all import predict_no_ui_long_connection
 5 | from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
 6 | import random
 7 | 
 8 | 
 9 | class MiniGame_ASCII_Art(GptAcademicGameBaseState):
10 |     def step(self, prompt, chatbot, history):
11 |         if self.step_cnt == 0:
12 |             chatbot.append(["我画你猜(动物)", "请稍等..."])
13 |         else:
14 |             if prompt.strip() == 'exit':
15 |                 self.delete_game = True
16 |                 yield from update_ui_latest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
17 |                 return
18 |             chatbot.append([prompt, ""])
19 |         yield from update_ui(chatbot=chatbot, history=history)
20 | 
21 |         if self.step_cnt == 0:
22 |             self.lock_plugin(chatbot)
23 |             self.cur_task = 'draw'
24 | 
25 |         if self.cur_task == 'draw':
26 |             avail_obj = ["狗","猫","鸟","鱼","老鼠","蛇"]
27 |             self.obj = random.choice(avail_obj)
28 |             inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + \
29 |                 f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. "
30 |             raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="")
31 |             self.cur_task = 'identify user guess'
32 |             res = get_code_block(raw_res)
33 |             history += ['', f'the answer is {self.obj}', inputs, res]
34 |             yield from update_ui_latest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
35 | 
36 |         elif self.cur_task == 'identify user guess':
37 |             if is_same_thing(self.obj, prompt, self.llm_kwargs):
38 |                 self.delete_game = True
39 |                 yield from update_ui_latest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
40 |             else:
41 |                 self.cur_task = 'identify user guess'
42 |                 yield from update_ui_latest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)


--------------------------------------------------------------------------------
/crazy_functions/game_fns/game_utils.py:
--------------------------------------------------------------------------------
 1 | 
 2 | from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
 3 | from request_llms.bridge_all import predict_no_ui_long_connection
 4 | def get_code_block(reply):
 5 |     import re
 6 |     pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
 7 |     matches = re.findall(pattern, reply) # find all code blocks in text
 8 |     if len(matches) == 1:
 9 |         return "```" + matches[0] + "```" #  code block
10 |     raise RuntimeError("GPT is not generating proper code.")
11 | 
12 | def is_same_thing(a, b, llm_kwargs):
13 |     from pydantic import BaseModel, Field
14 |     class IsSameThing(BaseModel):
15 |         is_same_thing: bool = Field(description="determine whether two objects are same thing.", default=False)
16 | 
17 |     def run_gpt_fn(inputs, sys_prompt, history=[]):
18 |         return predict_no_ui_long_connection(
19 |             inputs=inputs, llm_kwargs=llm_kwargs,
20 |             history=history, sys_prompt=sys_prompt, observe_window=[]
21 |         )
22 | 
23 |     gpt_json_io = GptJsonIO(IsSameThing)
24 |     inputs_01 = "Identity whether the user input and the target is the same thing: \n target object: {a} \n user input object: {b} \n\n\n".format(a=a, b=b)
25 |     inputs_01 += "\n\n\n Note that the user may describe the target object with a different language, e.g. cat and 猫 are the same thing."
26 |     analyze_res_cot_01 = run_gpt_fn(inputs_01, "", [])
27 | 
28 |     inputs_02 = inputs_01 + gpt_json_io.format_instructions
29 |     analyze_res = run_gpt_fn(inputs_02, "", [inputs_01, analyze_res_cot_01])
30 | 
31 |     try:
32 |         res = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
33 |         return res.is_same_thing
34 |     except JsonStringError as e:
35 |         return False


--------------------------------------------------------------------------------
/crazy_functions/gen_fns/gen_fns_shared.py:
--------------------------------------------------------------------------------
 1 | import time
 2 | import importlib
 3 | from toolbox import trimmed_format_exc, gen_time_str, get_log_folder
 4 | from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
 5 | from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_latest_msg
 6 | import multiprocessing
 7 | 
 8 | def get_class_name(class_string):
 9 |     import re
10 |     # Use regex to extract the class name
11 |     class_name = re.search(r'class (\w+)\(', class_string).group(1)
12 |     return class_name
13 | 
14 | def try_make_module(code, chatbot):
15 |     module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
16 |     fn_path = f'{get_log_folder(plugin_name="gen_plugin_verify")}/{module_file}.py'
17 |     with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
18 |     promote_file_to_downloadzone(fn_path, chatbot=chatbot)
19 |     class_name = get_class_name(code)
20 |     manager = multiprocessing.Manager()
21 |     return_dict = manager.dict()
22 |     p = multiprocessing.Process(target=is_function_successfully_generated, args=(fn_path, class_name, return_dict))
23 |     # only has 10 seconds to run
24 |     p.start(); p.join(timeout=10)
25 |     if p.is_alive(): p.terminate(); p.join()
26 |     p.close()
27 |     return return_dict["success"], return_dict['traceback']
28 | 
29 | # check is_function_successfully_generated
30 | def is_function_successfully_generated(fn_path, class_name, return_dict):
31 |     return_dict['success'] = False
32 |     return_dict['traceback'] = ""
33 |     try:
34 |         # Create a spec for the module
35 |         module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
36 |         # Load the module
37 |         example_module = importlib.util.module_from_spec(module_spec)
38 |         module_spec.loader.exec_module(example_module)
39 |         # Now you can use the module
40 |         some_class = getattr(example_module, class_name)
41 |         # Now you can create an instance of the class
42 |         instance = some_class()
43 |         return_dict['success'] = True
44 |         return
45 |     except:
46 |         return_dict['traceback'] = trimmed_format_exc()
47 |         return
48 | 
49 | def subprocess_worker(code, file_path, return_dict):
50 |     return_dict['result'] = None
51 |     return_dict['success'] = False
52 |     return_dict['traceback'] = ""
53 |     try:
54 |         module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
55 |         fn_path = f'{get_log_folder(plugin_name="gen_plugin_run")}/{module_file}.py'
56 |         with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
57 |         class_name = get_class_name(code)
58 |         # Create a spec for the module
59 |         module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
60 |         # Load the module
61 |         example_module = importlib.util.module_from_spec(module_spec)
62 |         module_spec.loader.exec_module(example_module)
63 |         # Now you can use the module
64 |         some_class = getattr(example_module, class_name)
65 |         # Now you can create an instance of the class
66 |         instance = some_class()
67 |         return_dict['result'] = instance.run(file_path)
68 |         return_dict['success'] = True
69 |     except:
70 |         return_dict['traceback'] = trimmed_format_exc()
71 | 


--------------------------------------------------------------------------------
/crazy_functions/ipc_fns/mp.py:
--------------------------------------------------------------------------------
 1 | import platform
 2 | import pickle
 3 | import multiprocessing
 4 | 
 5 | def run_in_subprocess_wrapper_func(v_args):
 6 |     func, args, kwargs, return_dict, exception_dict = pickle.loads(v_args)
 7 |     import sys
 8 |     try:
 9 |         result = func(*args, **kwargs)
10 |         return_dict['result'] = result
11 |     except Exception as e:
12 |         exc_info = sys.exc_info()
13 |         exception_dict['exception'] = exc_info
14 | 
15 | def run_in_subprocess_with_timeout(func, timeout=60):
16 |     if platform.system() == 'Linux':
17 |         def wrapper(*args, **kwargs):
18 |             return_dict = multiprocessing.Manager().dict()
19 |             exception_dict = multiprocessing.Manager().dict()
20 |             v_args = pickle.dumps((func, args, kwargs, return_dict, exception_dict))
21 |             process = multiprocessing.Process(target=run_in_subprocess_wrapper_func, args=(v_args,))
22 |             process.start()
23 |             process.join(timeout)
24 |             if process.is_alive():
25 |                 process.terminate()
26 |                 raise TimeoutError(f'功能单元{str(func)}未能在规定时间内完成任务')
27 |             process.close()
28 |             if 'exception' in exception_dict:
29 |                 # ooops, the subprocess ran into an exception
30 |                 exc_info = exception_dict['exception']
31 |                 raise exc_info[1].with_traceback(exc_info[2])
32 |             if 'result' in return_dict.keys():
33 |                 # If the subprocess ran successfully, return the result
34 |                 return return_dict['result']
35 |         return wrapper
36 |     else:
37 |         return func


--------------------------------------------------------------------------------
/crazy_functions/json_fns/select_tool.py:
--------------------------------------------------------------------------------
 1 | from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
 2 | 
 3 | def structure_output(txt, prompt, err_msg, run_gpt_fn, pydantic_cls):
 4 |     gpt_json_io = GptJsonIO(pydantic_cls)
 5 |     analyze_res = run_gpt_fn(
 6 |         txt, 
 7 |         sys_prompt=prompt + gpt_json_io.format_instructions
 8 |     )
 9 |     try:
10 |         friend = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
11 |     except JsonStringError as e:
12 |         return None, err_msg
13 | 
14 |     err_msg = ""
15 |     return friend, err_msg
16 | 
17 | 
18 | def select_tool(prompt, run_gpt_fn, pydantic_cls):
19 |     pydantic_cls_instance, err_msg = structure_output(
20 |         txt=prompt,
21 |         prompt="根据提示, 分析应该调用哪个工具函数\n\n",
22 |         err_msg=f"不能理解该联系人",
23 |         run_gpt_fn=run_gpt_fn,
24 |         pydantic_cls=pydantic_cls
25 |     )
26 |     return pydantic_cls_instance, err_msg


--------------------------------------------------------------------------------
/crazy_functions/latex_fns/latex_pickle_io.py:
--------------------------------------------------------------------------------
 1 | import pickle
 2 | 
 3 | 
 4 | class SafeUnpickler(pickle.Unpickler):
 5 | 
 6 |     def get_safe_classes(self):
 7 |         from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
 8 |         from crazy_functions.latex_fns.latex_toolbox import LinkedListNode
 9 |         from numpy.core.multiarray import scalar
10 |         from numpy import dtype
11 |         # 定义允许的安全类
12 |         safe_classes = {
13 |             # 在这里添加其他安全的类
14 |             'LatexPaperFileGroup': LatexPaperFileGroup,
15 |             'LatexPaperSplit': LatexPaperSplit,
16 |             'LinkedListNode': LinkedListNode,
17 |             'scalar': scalar,
18 |             'dtype': dtype,
19 |         }
20 |         return safe_classes
21 | 
22 |     def find_class(self, module, name):
23 |         # 只允许特定的类进行反序列化
24 |         self.safe_classes = self.get_safe_classes()
25 |         match_class_name = None
26 |         for class_name in self.safe_classes.keys():
27 |             if (class_name in f'{module}.{name}'):
28 |                 match_class_name = class_name
29 |         if match_class_name is not None:
30 |             return self.safe_classes[match_class_name]
31 |         # 如果尝试加载未授权的类,则抛出异常
32 |         raise pickle.UnpicklingError(f"Attempted to deserialize unauthorized class '{name}' from module '{module}'")
33 | 
34 | def objdump(obj, file="objdump.tmp"):
35 | 
36 |     with open(file, "wb+") as f:
37 |         pickle.dump(obj, f)
38 |     return
39 | 
40 | 
41 | def objload(file="objdump.tmp"):
42 |     import os
43 | 
44 |     if not os.path.exists(file):
45 |         return
46 |     with open(file, "rb") as f:
47 |         unpickler = SafeUnpickler(f)
48 |         return unpickler.load()
49 | 


--------------------------------------------------------------------------------
/crazy_functions/live_audio/audio_io.py:
--------------------------------------------------------------------------------
 1 | import numpy as np
 2 | from scipy import interpolate
 3 | 
 4 | def Singleton(cls):
 5 |     _instance = {}
 6 | 
 7 |     def _singleton(*args, **kargs):
 8 |         if cls not in _instance:
 9 |             _instance[cls] = cls(*args, **kargs)
10 |         return _instance[cls]
11 | 
12 |     return _singleton
13 | 
14 | 
15 | @Singleton
16 | class RealtimeAudioDistribution():
17 |     def __init__(self) -> None:
18 |         self.data = {}
19 |         self.max_len = 1024*1024
20 |         self.rate = 48000   # 只读,每秒采样数量
21 | 
22 |     def clean_up(self):
23 |         self.data = {}
24 | 
25 |     def feed(self, uuid, audio):
26 |         self.rate, audio_ = audio
27 |         # print('feed', len(audio_), audio_[-25:])
28 |         if uuid not in self.data:
29 |             self.data[uuid] = audio_
30 |         else:
31 |             new_arr = np.concatenate((self.data[uuid], audio_))
32 |             if len(new_arr) > self.max_len: new_arr = new_arr[-self.max_len:]
33 |             self.data[uuid] = new_arr
34 | 
35 |     def read(self, uuid):
36 |         if uuid in self.data:
37 |             res = self.data.pop(uuid)
38 |             # print('\r read-', len(res), '-', max(res), end='', flush=True)
39 |         else:
40 |             res = None
41 |         return res
42 | 
43 | def change_sample_rate(audio, old_sr, new_sr):
44 |     duration = audio.shape[0] / old_sr
45 | 
46 |     time_old  = np.linspace(0, duration, audio.shape[0])
47 |     time_new  = np.linspace(0, duration, int(audio.shape[0] * new_sr / old_sr))
48 | 
49 |     interpolator = interpolate.interp1d(time_old, audio.T)
50 |     new_audio = interpolator(time_new).T
51 |     return new_audio.astype(np.int16)


--------------------------------------------------------------------------------
/crazy_functions/media_fns/get_media.py:
--------------------------------------------------------------------------------
 1 | from toolbox import update_ui, get_conf, promote_file_to_downloadzone, update_ui_latest_msg, generate_file_link
 2 | from shared_utils.docker_as_service_api import stream_daas
 3 | from shared_utils.docker_as_service_api import DockerServiceApiComModel
 4 | import random
 5 | 
 6 | def download_video(video_id, only_audio, user_name, chatbot, history):
 7 |     from toolbox import get_log_folder
 8 |     chatbot.append([None, "Processing..."])
 9 |     yield from update_ui(chatbot, history)
10 |     client_command = f'{video_id} --audio-only' if only_audio else video_id
11 |     server_urls = get_conf('DAAS_SERVER_URLS')
12 |     server_url = random.choice(server_urls)
13 |     docker_service_api_com_model = DockerServiceApiComModel(client_command=client_command)
14 |     save_file_dir = get_log_folder(user_name, plugin_name='media_downloader')
15 |     for output_manifest in stream_daas(docker_service_api_com_model, server_url, save_file_dir):
16 |         status_buf = ""
17 |         status_buf += "DaaS message: \n\n"
18 |         status_buf += output_manifest['server_message'].replace('\n', '<br/>')
19 |         status_buf += "\n\n"
20 |         status_buf += "DaaS standard error: \n\n"
21 |         status_buf += output_manifest['server_std_err'].replace('\n', '<br/>')
22 |         status_buf += "\n\n"
23 |         status_buf += "DaaS standard output: \n\n"
24 |         status_buf += output_manifest['server_std_out'].replace('\n', '<br/>')
25 |         status_buf += "\n\n"
26 |         status_buf += "DaaS file attach: \n\n"
27 |         status_buf += str(output_manifest['server_file_attach'])
28 |         yield from update_ui_latest_msg(status_buf, chatbot, history)
29 | 
30 |     return output_manifest['server_file_attach']
31 | 
32 | 
33 | def search_videos(keywords):
34 |     from toolbox import get_log_folder
35 |     client_command = keywords
36 |     server_urls = get_conf('DAAS_SERVER_URLS')
37 |     server_url = random.choice(server_urls)
38 |     server_url = server_url.replace('stream', 'search')
39 |     docker_service_api_com_model = DockerServiceApiComModel(client_command=client_command)
40 |     save_file_dir = get_log_folder("default_user", plugin_name='media_downloader')
41 |     for output_manifest in stream_daas(docker_service_api_com_model, server_url, save_file_dir):
42 |         return output_manifest['server_message']
43 | 
44 | 


--------------------------------------------------------------------------------
/crazy_functions/pdf_fns/parse_pdf_grobid.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
 3 | from toolbox import update_ui, promote_file_to_downloadzone, update_ui_latest_msg, disable_auto_promotion
 4 | from toolbox import write_history_to_file, promote_file_to_downloadzone, get_conf, extract_archive
 5 | from crazy_functions.pdf_fns.parse_pdf import parse_pdf, translate_pdf
 6 | 
 7 | def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, grobid_url):
 8 |     import copy, json
 9 |     TOKEN_LIMIT_PER_FRAGMENT = 1024
10 |     generated_conclusion_files = []
11 |     generated_html_files = []
12 |     DST_LANG = "中文"
13 |     from crazy_functions.pdf_fns.report_gen_html import construct_html
14 |     for index, fp in enumerate(file_manifest):
15 |         chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
16 |         article_dict = parse_pdf(fp, grobid_url)
17 |         grobid_json_res = os.path.join(get_log_folder(), gen_time_str() + "grobid.json")
18 |         with open(grobid_json_res, 'w+', encoding='utf8') as f:
19 |             f.write(json.dumps(article_dict, indent=4, ensure_ascii=False))
20 |         promote_file_to_downloadzone(grobid_json_res, chatbot=chatbot)
21 |         if article_dict is None: raise RuntimeError("解析PDF失败,请检查PDF是否损坏。")
22 |         yield from translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG, plugin_kwargs=plugin_kwargs)
23 |     chatbot.append(("给出输出文件清单", str(generated_conclusion_files + generated_html_files)))
24 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
25 | 
26 | 
27 | 


--------------------------------------------------------------------------------
/crazy_functions/pdf_fns/report_gen_html.py:
--------------------------------------------------------------------------------
 1 | from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
 2 | import os
 3 | 
 4 | 
 5 | 
 6 | 
 7 | class construct_html():
 8 |     def __init__(self) -> None:
 9 |         self.html_string = ""
10 | 
11 |     def add_row(self, a, b):
12 |         from toolbox import markdown_convertion
13 |         template = """
14 |             {
15 |                 primary_col: {
16 |                     header: String.raw`__PRIMARY_HEADER__`,
17 |                     msg: String.raw`__PRIMARY_MSG__`,
18 |                 },
19 |                 secondary_rol: {
20 |                     header: String.raw`__SECONDARY_HEADER__`,
21 |                     msg: String.raw`__SECONDARY_MSG__`,
22 |                 }
23 |             },
24 |         """
25 |         def std(str):
26 |             str = str.replace(r'`',r'&#96;')
27 |             if str.endswith("\\"): str += ' '
28 |             if str.endswith("}"): str += ' '
29 |             if str.endswith("
quot;): str += ' '
30 |             return str
31 | 
32 |         template_ = template
33 |         a_lines = a.split('\n')
34 |         b_lines = b.split('\n')
35 | 
36 |         if len(a_lines) == 1 or len(a_lines[0]) > 50:
37 |             template_ = template_.replace("__PRIMARY_HEADER__", std(a[:20]))
38 |             template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion(a)))
39 |         else:
40 |             template_ = template_.replace("__PRIMARY_HEADER__", std(a_lines[0]))
41 |             template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion('\n'.join(a_lines[1:]))))
42 | 
43 |         if len(b_lines) == 1 or len(b_lines[0]) > 50:
44 |             template_ = template_.replace("__SECONDARY_HEADER__", std(b[:20]))
45 |             template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion(b)))
46 |         else:
47 |             template_ = template_.replace("__SECONDARY_HEADER__", std(b_lines[0]))
48 |             template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion('\n'.join(b_lines[1:]))))
49 |         self.html_string += template_
50 | 
51 |     def save_file(self, file_name):
52 |         from toolbox import get_log_folder
53 |         with open('crazy_functions/pdf_fns/report_template.html', 'r', encoding='utf8') as f:
54 |             html_template = f.read()
55 |         html_template = html_template.replace("__TF_ARR__", self.html_string)
56 |         with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
57 |             f.write(html_template.encode('utf-8', 'ignore').decode())
58 |         return os.path.join(get_log_folder(), file_name)
59 | 


--------------------------------------------------------------------------------
/crazy_functions/pdf_fns/report_template_v2.html:
--------------------------------------------------------------------------------
 1 | <!DOCTYPE html>
 2 | <html xmlns="http://www.w3.org/1999/xhtml">
 3 | 
 4 | <head>
 5 |     <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
 6 |     <title>GPT-Academic 翻译报告书</title>
 7 |     <style>
 8 |         .centered-a {
 9 |             color: red;
10 |             text-align: center;
11 |             margin-bottom: 2%;
12 |             font-size: 1.5em;
13 |         }
14 |         .centered-b {
15 |             color: red;
16 |             text-align: center;
17 |             margin-top: 10%;
18 |             margin-bottom: 20%;
19 |             font-size: 1.5em;
20 |         }
21 |         .centered-c {
22 |             color: rgba(255, 0, 0, 0);
23 |             text-align: center;
24 |             margin-top: 2%;
25 |             margin-bottom: 20%;
26 |             font-size: 7em;
27 |         }
28 |     </style>
29 | <script>
30 |         // Configure MathJax settings
31 |         MathJax = {
32 |             tex: {
33 |                 inlineMath: [
34 |                     ['
#39;, '
#39;],
35 |                     ['\(', '\)']
36 |                 ]
37 |             }
38 |         }
39 |         addEventListener('zero-md-rendered', () => {MathJax.typeset(); console.log('MathJax typeset!');})
40 |     </script>
41 |     <!-- Load MathJax library -->
42 |     <script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"></script>
43 |     <script
44 |         type="module"
45 |         src="https://cdn.jsdelivr.net/gh/zerodevx/zero-md@2/dist/zero-md.min.js"
46 |     ></script>
47 | 
48 | </head>
49 | 
50 | <body>
51 |     <div class="test_temp1" style="width:10%; height: 500px; float:left;">
52 | 
53 |     </div>
54 |     <div class="test_temp2" style="width:80%; height: 500px; float:left;">
55 |         <!-- Simply set the `src` attribute to your MD file and win -->
56 |         <div class="centered-a">
57 |             请按Ctrl+S保存此页面,否则该页面可能在几分钟后失效。
58 |         </div>
59 |         <zero-md src="translated_markdown.md" no-shadow>
60 |         </zero-md>
61 |         <div class="centered-b">
62 |             本报告由GPT-Academic开源项目生成,地址:https://github.com/binary-husky/gpt_academic。
63 |         </div>
64 |         <div class="centered-c">
65 |             本报告由GPT-Academic开源项目生成,地址:https://github.com/binary-husky/gpt_academic。
66 |         </div>
67 |     </div>
68 |     <div class="test_temp3" style="width:10%; height: 500px; float:left;">
69 |     </div>
70 | 
71 |     </body>
72 | 
73 | </html>


--------------------------------------------------------------------------------
/crazy_functions/plugin_template/plugin_class_template.py:
--------------------------------------------------------------------------------
 1 | import os, json, base64
 2 | from pydantic import BaseModel, Field
 3 | from textwrap import dedent
 4 | from typing import List
 5 | 
 6 | class ArgProperty(BaseModel): # PLUGIN_ARG_MENU
 7 |     title: str = Field(description="The title", default="")
 8 |     description: str = Field(description="The description", default="")
 9 |     default_value: str = Field(description="The default value", default="")
10 |     type: str = Field(description="The type", default="")   # currently we support ['string', 'dropdown']
11 |     options: List[str] = Field(default=[], description="List of options available for the argument") # only used when type is 'dropdown'
12 | 
13 | class GptAcademicPluginTemplate():
14 |     def __init__(self):
15 |         # please note that `execute` method may run in different threads,
16 |         # thus you should not store any state in the plugin instance,
17 |         # which may be accessed by multiple threads
18 |         pass
19 | 
20 | 
21 |     def define_arg_selection_menu(self):
22 |         """
23 |         An example as below:
24 |             ```
25 |             def define_arg_selection_menu(self):
26 |                 gui_definition = {
27 |                     "main_input":
28 |                         ArgProperty(title="main input", description="description", default_value="default_value", type="string").model_dump_json(),
29 |                     "advanced_arg":
30 |                         ArgProperty(title="advanced arguments", description="description", default_value="default_value", type="string").model_dump_json(),
31 |                     "additional_arg_01":
32 |                         ArgProperty(title="additional", description="description", default_value="default_value", type="string").model_dump_json(),
33 |                 }
34 |                 return gui_definition
35 |             ```
36 |         """
37 |         raise NotImplementedError("You need to implement this method in your plugin class")
38 | 
39 | 
40 |     def get_js_code_for_generating_menu(self, btnName):
41 |         define_arg_selection = self.define_arg_selection_menu()
42 | 
43 |         if len(define_arg_selection.keys()) > 8:
44 |             raise ValueError("You can only have up to 8 arguments in the define_arg_selection")
45 |         # if "main_input" not in define_arg_selection:
46 |         #     raise ValueError("You must have a 'main_input' in the define_arg_selection")
47 | 
48 |         DEFINE_ARG_INPUT_INTERFACE = json.dumps(define_arg_selection)
49 |         return base64.b64encode(DEFINE_ARG_INPUT_INTERFACE.encode('utf-8')).decode('utf-8')
50 | 
51 |     def execute(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
52 |         raise NotImplementedError("You need to implement this method in your plugin class")


--------------------------------------------------------------------------------
/crazy_functions/prompts/internet.py:
--------------------------------------------------------------------------------
 1 | SearchOptimizerPrompt="""作为一个网页搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高网页检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
 2 | 历史记录: 
 3 | "
 4 | Q: 对话背景。
 5 | A: 当前对话是关于 Nginx 的介绍和在Ubuntu上的使用等。
 6 | "
 7 | 原问题: 怎么下载
 8 | 检索词: ["Nginx 下载","Ubuntu Nginx","Ubuntu安装Nginx"]
 9 | ----------------
10 | 历史记录: 
11 | "
12 | Q: 对话背景。
13 | A: 当前对话是关于 Nginx 的介绍和使用等。
14 | Q: 报错 "no connection"
15 | A: 报错"no connection"可能是因为……
16 | "
17 | 原问题: 怎么解决
18 | 检索词: ["Nginx报错"no connection" 解决","Nginx'no connection'报错 原因","Nginx提示'no connection'"]
19 | ----------------
20 | 历史记录:
21 | "
22 | 
23 | "
24 | 原问题: 你知道 Python 么?
25 | 检索词: ["Python","Python 使用教程。","Python 特点和优势"]
26 | ----------------
27 | 历史记录:
28 | "
29 | Q: 列出Java的三种特点?
30 | A: 1. Java 是一种编译型语言。
31 |    2. Java 是一种面向对象的编程语言。
32 |    3. Java 是一种跨平台的编程语言。
33 | "
34 | 原问题: 介绍下第2点。
35 | 检索词: ["Java 面向对象特点","Java 面向对象编程优势。","Java 面向对象编程"]
36 | ----------------
37 | 现在有历史记录:
38 | "
39 | {history}
40 | "
41 | 有其原问题: {query}
42 | 直接给出最多{num}个检索词,必须以json形式给出,不得有多余字符:
43 | """
44 | 
45 | SearchAcademicOptimizerPrompt="""作为一个学术论文搜索助手,你的任务是结合历史记录,从不同角度,为“原问题”生成个不同版本的“检索词”,从而提高学术论文检索的精度。生成的问题要求指向对象清晰明确,并与“原问题语言相同”。例如:
46 | 历史记录: 
47 | "
48 | Q: 对话背景。
49 | A: 当前对话是关于深度学习的介绍和在图像识别中的应用等。
50 | "
51 | 原问题: 怎么下载相关论文
52 | 检索词: ["深度学习 图像识别 论文下载","图像识别 深度学习 研究论文","深度学习 图像识别 论文资源","Deep Learning Image Recognition Paper Download","Image Recognition Deep Learning Research Paper"]
53 | ----------------
54 | 历史记录: 
55 | "
56 | Q: 对话背景。
57 | A: 当前对话是关于深度学习的介绍和应用等。
58 | Q: 报错 "模型不收敛"
59 | A: 报错"模型不收敛"可能是因为……
60 | "
61 | 原问题: 怎么解决
62 | 检索词: ["深度学习 模型不收敛 解决方案 论文","深度学习 模型不收敛 原因 研究","深度学习 模型不收敛 论文","Deep Learning Model Convergence Issue Solution Paper","Deep Learning Model Convergence Problem Research"]
63 | ----------------
64 | 历史记录:
65 | "
66 | 
67 | "
68 | 原问题: 你知道 GAN 么?
69 | 检索词: ["生成对抗网络 论文","GAN 使用教程 论文","GAN 特点和优势 研究","Generative Adversarial Network Paper","GAN Usage Tutorial Paper"]
70 | ----------------
71 | 历史记录:
72 | "
73 | Q: 列出机器学习的三种应用?
74 | A: 1. 机器学习在图像识别中的应用。
75 |    2. 机器学习在自然语言处理中的应用。
76 |    3. 机器学习在推荐系统中的应用。
77 | "
78 | 原问题: 介绍下第2点。
79 | 检索词: ["机器学习 自然语言处理 应用 论文","机器学习 自然语言处理 研究","机器学习 NLP 应用 论文","Machine Learning Natural Language Processing Application Paper","Machine Learning NLP Research"]
80 | ----------------
81 | 现在有历史记录:
82 | "
83 | {history}
84 | "
85 | 有其原问题: {query}
86 | 直接给出最多{num}个检索词,必须以json形式给出,不得有多余字符:
87 | """


--------------------------------------------------------------------------------
/crazy_functions/rag_fns/rag_file_support.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | from llama_index.core import SimpleDirectoryReader
 3 | 
 4 | supports_format = ['.csv', '.docx', '.epub', '.ipynb',  '.mbox', '.md', '.pdf',  '.txt', '.ppt',
 5 |                    '.pptm', '.pptx']
 6 | 
 7 | 
 8 | # 修改后的 extract_text 函数,结合 SimpleDirectoryReader 和自定义解析逻辑
 9 | def extract_text(file_path):
10 |     _, ext = os.path.splitext(file_path.lower())
11 | 
12 |     # 使用 SimpleDirectoryReader 处理它支持的文件格式
13 |     if ext in supports_format:
14 |         try:
15 |             reader = SimpleDirectoryReader(input_files=[file_path])
16 |             documents = reader.load_data()
17 |             if len(documents) > 0:
18 |                 return documents[0].text
19 |         except Exception as e:
20 |             pass
21 | 
22 |     return None
23 | 


--------------------------------------------------------------------------------
/crazy_functions/rag_fns/vector_store_index.py:
--------------------------------------------------------------------------------
 1 | from llama_index.core import VectorStoreIndex
 2 | from typing import Any,  List, Optional
 3 | 
 4 | from llama_index.core.callbacks.base import CallbackManager
 5 | from llama_index.core.schema import TransformComponent
 6 | from llama_index.core.service_context import ServiceContext
 7 | from llama_index.core.settings import (
 8 |     Settings,
 9 |     callback_manager_from_settings_or_context,
10 |     transformations_from_settings_or_context,
11 | )
12 | from llama_index.core.storage.storage_context import StorageContext
13 | 
14 | 
15 | class GptacVectorStoreIndex(VectorStoreIndex):
16 |     
17 |     @classmethod
18 |     def default_vector_store(
19 |         cls,
20 |         storage_context: Optional[StorageContext] = None,
21 |         show_progress: bool = False,
22 |         callback_manager: Optional[CallbackManager] = None,
23 |         transformations: Optional[List[TransformComponent]] = None,
24 |         # deprecated
25 |         service_context: Optional[ServiceContext] = None,
26 |         embed_model = None,
27 |         **kwargs: Any,
28 |     ):
29 |         """Create index from documents.
30 | 
31 |         Args:
32 |             documents (Optional[Sequence[BaseDocument]]): List of documents to
33 |                 build the index from.
34 | 
35 |         """
36 |         storage_context = storage_context or StorageContext.from_defaults()
37 |         docstore = storage_context.docstore
38 |         callback_manager = (
39 |             callback_manager
40 |             or callback_manager_from_settings_or_context(Settings, service_context)
41 |         )
42 |         transformations = transformations or transformations_from_settings_or_context(
43 |             Settings, service_context
44 |         )
45 | 
46 |         with callback_manager.as_trace("index_construction"):
47 | 
48 |             return cls(
49 |                 nodes=[],
50 |                 storage_context=storage_context,
51 |                 callback_manager=callback_manager,
52 |                 show_progress=show_progress,
53 |                 transformations=transformations,
54 |                 service_context=service_context,
55 |                 embed_model=embed_model,
56 |                 **kwargs,
57 |             )
58 | 
59 | 


--------------------------------------------------------------------------------
/crazy_functions/vector_fns/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/crazy_functions/vector_fns/__init__.py


--------------------------------------------------------------------------------
/crazy_functions/vector_fns/general_file_loader.py:
--------------------------------------------------------------------------------
 1 | # From project chatglm-langchain
 2 | 
 3 | 
 4 | from langchain.document_loaders import UnstructuredFileLoader
 5 | from langchain.text_splitter import CharacterTextSplitter
 6 | import re
 7 | from typing import List
 8 | 
 9 | class ChineseTextSplitter(CharacterTextSplitter):
10 |     def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
11 |         super().__init__(**kwargs)
12 |         self.pdf = pdf
13 |         self.sentence_size = sentence_size
14 | 
15 |     def split_text1(self, text: str) -> List[str]:
16 |         if self.pdf:
17 |             text = re.sub(r"\n{3,}", "\n", text)
18 |             text = re.sub('\s', ' ', text)
19 |             text = text.replace("\n\n", "")
20 |         sent_sep_pattern = re.compile('([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')  # del :;
21 |         sent_list = []
22 |         for ele in sent_sep_pattern.split(text):
23 |             if sent_sep_pattern.match(ele) and sent_list:
24 |                 sent_list[-1] += ele
25 |             elif ele:
26 |                 sent_list.append(ele)
27 |         return sent_list
28 | 
29 |     def split_text(self, text: str) -> List[str]:   ##此处需要进一步优化逻辑
30 |         if self.pdf:
31 |             text = re.sub(r"\n{3,}", r"\n", text)
32 |             text = re.sub('\s', " ", text)
33 |             text = re.sub("\n\n", "", text)
34 | 
35 |         text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text)  # 单字符断句符
36 |         text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)  # 英文省略号
37 |         text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)  # 中文省略号
38 |         text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
39 |         # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
40 |         text = text.rstrip()  # 段尾如果有多余的\n就去掉它
41 |         # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
42 |         ls = [i for i in text.split("\n") if i]
43 |         for ele in ls:
44 |             if len(ele) > self.sentence_size:
45 |                 ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
46 |                 ele1_ls = ele1.split("\n")
47 |                 for ele_ele1 in ele1_ls:
48 |                     if len(ele_ele1) > self.sentence_size:
49 |                         ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
50 |                         ele2_ls = ele_ele2.split("\n")
51 |                         for ele_ele2 in ele2_ls:
52 |                             if len(ele_ele2) > self.sentence_size:
53 |                                 ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
54 |                                 ele2_id = ele2_ls.index(ele_ele2)
55 |                                 ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
56 |                                                                                                        ele2_id + 1:]
57 |                         ele_id = ele1_ls.index(ele_ele1)
58 |                         ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
59 | 
60 |                 id = ls.index(ele)
61 |                 ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
62 |         return ls
63 | 
64 | def load_file(filepath, sentence_size):
65 |     loader = UnstructuredFileLoader(filepath, mode="elements")
66 |     textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
67 |     docs = loader.load_and_split(text_splitter=textsplitter)
68 |     # write_check_file(filepath, docs)
69 |     return docs
70 | 
71 | 


--------------------------------------------------------------------------------
/crazy_functions/vt_fns/vt_state.py:
--------------------------------------------------------------------------------
 1 | import pickle
 2 | 
 3 | class VoidTerminalState():
 4 |     def __init__(self):
 5 |         self.reset_state()
 6 | 
 7 |     def reset_state(self):
 8 |         self.has_provided_explanation = False
 9 | 
10 |     def lock_plugin(self, chatbot):
11 |         chatbot._cookies['lock_plugin'] = 'crazy_functions.虚空终端->虚空终端'
12 |         chatbot._cookies['plugin_state'] = pickle.dumps(self)
13 | 
14 |     def unlock_plugin(self, chatbot):
15 |         self.reset_state()
16 |         chatbot._cookies['lock_plugin'] = None
17 |         chatbot._cookies['plugin_state'] = pickle.dumps(self)
18 | 
19 |     def set_state(self, chatbot, key, value):
20 |         setattr(self, key, value)
21 |         chatbot._cookies['plugin_state'] = pickle.dumps(self)
22 | 
23 |     def get_state(chatbot):
24 |         state = chatbot._cookies.get('plugin_state', None)
25 |         if state is not None:   state = pickle.loads(state)
26 |         else:                   state = VoidTerminalState()
27 |         state.chatbot = chatbot
28 |         return state


--------------------------------------------------------------------------------
/crazy_functions/互动小游戏.py:
--------------------------------------------------------------------------------
 1 | from toolbox import CatchException, update_ui, update_ui_latest_msg
 2 | from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
 3 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 4 | from request_llms.bridge_all import predict_no_ui_long_connection
 5 | from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
 6 | 
 7 | @CatchException
 8 | def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
 9 |     from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory
10 |     # 清空历史
11 |     history = []
12 |     # 选择游戏
13 |     cls = MiniGame_ResumeStory
14 |     # 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
15 |     state = cls.sync_state(chatbot,
16 |                            llm_kwargs,
17 |                            cls,
18 |                            plugin_name='MiniGame_ResumeStory',
19 |                            callback_fn='crazy_functions.互动小游戏->随机小游戏',
20 |                            lock_plugin=True
21 |                            )
22 |     yield from state.continue_game(prompt, chatbot, history)
23 | 
24 | 
25 | @CatchException
26 | def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
27 |     from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art
28 |     # 清空历史
29 |     history = []
30 |     # 选择游戏
31 |     cls = MiniGame_ASCII_Art
32 |     # 如果之前已经初始化了游戏实例,则继续该实例;否则重新初始化
33 |     state = cls.sync_state(chatbot,
34 |                            llm_kwargs,
35 |                            cls,
36 |                            plugin_name='MiniGame_ASCII_Art',
37 |                            callback_fn='crazy_functions.互动小游戏->随机小游戏1',
38 |                            lock_plugin=True
39 |                            )
40 |     yield from state.continue_game(prompt, chatbot, history)
41 | 


--------------------------------------------------------------------------------
/crazy_functions/交互功能函数模板.py:
--------------------------------------------------------------------------------
 1 | from toolbox import CatchException, update_ui
 2 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 3 | 
 4 | @CatchException
 5 | def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
 6 |     """
 7 |     txt             输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
 8 |     llm_kwargs      gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
 9 |     plugin_kwargs   插件模型的参数, 如温度和top_p等, 一般原样传递下去就行
10 |     chatbot         聊天显示框的句柄,用于显示给用户
11 |     history         聊天历史,前情提要
12 |     system_prompt   给gpt的静默提醒
13 |     user_request    当前用户的请求信息(IP地址等)
14 |     """
15 |     history = []    # 清空历史,以免输入溢出
16 |     chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
17 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
18 | 
19 |     state = chatbot._cookies.get('plugin_state_0001', None) # 初始化插件状态
20 | 
21 |     if state is None:
22 |         chatbot._cookies['lock_plugin'] = 'crazy_functions.交互功能函数模板->交互功能模板函数'      # 赋予插件锁定 锁定插件回调路径,当下一次用户提交时,会直接转到该函数
23 |         chatbot._cookies['plugin_state_0001'] = 'wait_user_keyword'                              # 赋予插件状态
24 | 
25 |         chatbot.append(("第一次调用:", "请输入关键词, 我将为您查找相关壁纸, 建议使用英文单词, 插件锁定中,请直接提交即可。"))
26 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
27 |         return
28 | 
29 |     if state == 'wait_user_keyword':
30 |         chatbot._cookies['lock_plugin'] = None          # 解除插件锁定,避免遗忘导致死锁
31 |         chatbot._cookies['plugin_state_0001'] = None    # 解除插件状态,避免遗忘导致死锁
32 | 
33 |         # 解除插件锁定
34 |         chatbot.append((f"获取关键词:{txt}", ""))
35 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
36 |         page_return = get_image_page_by_keyword(txt)
37 |         inputs=inputs_show_user=f"Extract all image urls in this html page, pick the first 5 images and show them with markdown format: \n\n {page_return}"
38 |         gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
39 |             inputs=inputs, inputs_show_user=inputs_show_user,
40 |             llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
41 |             sys_prompt="When you want to show an image, use markdown format. e.g. ![image_description](image_url). If there are no image url provided, answer 'no image url provided'"
42 |         )
43 |         chatbot[-1] = [chatbot[-1][0], gpt_say]
44 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
45 |         return
46 | 
47 | 
48 | 
49 | # ---------------------------------------------------------------------------------
50 | 
51 | def get_image_page_by_keyword(keyword):
52 |     import requests
53 |     from bs4 import BeautifulSoup
54 |     response = requests.get(f'https://wallhaven.cc/search?q={keyword}', timeout=2)
55 |     res = "image urls: \n"
56 |     for image_element in BeautifulSoup(response.content, 'html.parser').findAll("img"):
57 |         try:
58 |             res += image_element["data-src"]
59 |             res += "\n"
60 |         except:
61 |             pass
62 |     return res
63 | 


--------------------------------------------------------------------------------
/crazy_functions/命令行助手.py:
--------------------------------------------------------------------------------
 1 | from toolbox import CatchException, update_ui, gen_time_str
 2 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 3 | from crazy_functions.crazy_utils import input_clipping
 4 | import copy, json
 5 | 
 6 | @CatchException
 7 | def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
 8 |     """
 9 |     txt             输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
10 |     llm_kwargs      gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
11 |     plugin_kwargs   插件模型的参数, 暂时没有用武之地
12 |     chatbot         聊天显示框的句柄, 用于显示给用户
13 |     history         聊天历史, 前情提要
14 |     system_prompt   给gpt的静默提醒
15 |     user_request    当前用户的请求信息(IP地址等)
16 |     """
17 |     # 清空历史, 以免输入溢出
18 |     history = []
19 | 
20 |     # 输入
21 |     i_say = "请写bash命令实现以下功能:" + txt
22 |     # 开始
23 |     gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
24 |         inputs=i_say, inputs_show_user=txt,
25 |         llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
26 |         sys_prompt="你是一个Linux大师级用户。注意,当我要求你写bash命令时,尽可能地仅用一行命令解决我的要求。"
27 |     )
28 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
29 | 
30 | 
31 | 
32 | 


--------------------------------------------------------------------------------
/crazy_functions/生成函数注释.py:
--------------------------------------------------------------------------------
 1 | from loguru import logger
 2 | from toolbox import update_ui
 3 | from toolbox import CatchException, report_exception
 4 | from toolbox import write_history_to_file, promote_file_to_downloadzone
 5 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 6 | 
 7 | def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
 8 |     import time, os
 9 |     logger.info('begin analysis on:', file_manifest)
10 |     for index, fp in enumerate(file_manifest):
11 |         with open(fp, 'r', encoding='utf-8', errors='replace') as f:
12 |             file_content = f.read()
13 | 
14 |         i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
15 |         i_say_show_user = f'[{index+1}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
16 |         chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
17 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
18 | 
19 |         msg = '正常'
20 |         # ** gpt request **
21 |         gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
22 |             i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt)   # 带超时倒计时
23 | 
24 |         chatbot[-1] = (i_say_show_user, gpt_say)
25 |         history.append(i_say_show_user); history.append(gpt_say)
26 |         yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
27 |         time.sleep(2)
28 | 
29 |     res = write_history_to_file(history)
30 |     promote_file_to_downloadzone(res, chatbot=chatbot)
31 |     chatbot.append(("完成了吗?", res))
32 |     yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
33 | 
34 | 
35 | 
36 | @CatchException
37 | def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
38 |     history = []    # 清空历史,以免输入溢出
39 |     import glob, os
40 |     if os.path.exists(txt):
41 |         project_folder = txt
42 |     else:
43 |         if txt == "": txt = '空空如也的输入栏'
44 |         report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
45 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
46 |         return
47 |     file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
48 |                     [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
49 | 
50 |     if len(file_manifest) == 0:
51 |         report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
52 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
53 |         return
54 |     yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
55 | 


--------------------------------------------------------------------------------
/crazy_functions/询问多个大语言模型.py:
--------------------------------------------------------------------------------
 1 | from toolbox import CatchException, update_ui, get_conf
 2 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 3 | import datetime
 4 | @CatchException
 5 | def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
 6 |     """
 7 |     txt             输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
 8 |     llm_kwargs      gpt模型参数,如温度和top_p等,一般原样传递下去就行
 9 |     plugin_kwargs   插件模型的参数,用于灵活调整复杂功能的各种参数
10 |     chatbot         聊天显示框的句柄,用于显示给用户
11 |     history         聊天历史,前情提要
12 |     system_prompt   给gpt的静默提醒
13 |     user_request    当前用户的请求信息(IP地址等)
14 |     """
15 |     history = []    # 清空历史,以免输入溢出
16 |     MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
17 |     chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS))
18 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
19 | 
20 |     # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
21 |     llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔
22 |     gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
23 |         inputs=txt, inputs_show_user=txt,
24 |         llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
25 |         sys_prompt=system_prompt,
26 |         retry_times_at_unknown_error=0
27 |     )
28 | 
29 |     history.append(txt)
30 |     history.append(gpt_say)
31 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
32 | 
33 | 
34 | @CatchException
35 | def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
36 |     """
37 |     txt             输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
38 |     llm_kwargs      gpt模型参数,如温度和top_p等,一般原样传递下去就行
39 |     plugin_kwargs   插件模型的参数,用于灵活调整复杂功能的各种参数
40 |     chatbot         聊天显示框的句柄,用于显示给用户
41 |     history         聊天历史,前情提要
42 |     system_prompt   给gpt的静默提醒
43 |     user_request    当前用户的请求信息(IP地址等)
44 |     """
45 |     history = []    # 清空历史,以免输入溢出
46 | 
47 |     if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
48 |     # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
49 |     llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
50 | 
51 |     chatbot.append((txt, f"正在同时咨询{llm_kwargs['llm_model']}"))
52 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
53 | 
54 |     gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
55 |         inputs=txt, inputs_show_user=txt,
56 |         llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
57 |         sys_prompt=system_prompt,
58 |         retry_times_at_unknown_error=0
59 |     )
60 | 
61 |     history.append(txt)
62 |     history.append(gpt_say)
63 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新


--------------------------------------------------------------------------------
/crazy_functions/读文章写摘要.py:
--------------------------------------------------------------------------------
 1 | from toolbox import update_ui
 2 | from toolbox import CatchException, report_exception
 3 | from toolbox import write_history_to_file, promote_file_to_downloadzone
 4 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 5 | 
 6 | 
 7 | def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
 8 |     import time, glob, os
 9 |     for index, fp in enumerate(file_manifest):
10 |         with open(fp, 'r', encoding='utf-8', errors='replace') as f:
11 |             file_content = f.read()
12 | 
13 |         prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
14 |         i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
15 |         i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
16 |         chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
17 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
18 | 
19 |         msg = '正常'
20 |         gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt)   # 带超时倒计时
21 |         chatbot[-1] = (i_say_show_user, gpt_say)
22 |         history.append(i_say_show_user); history.append(gpt_say)
23 |         yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
24 |         time.sleep(2)
25 | 
26 |     all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
27 |     i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
28 |     chatbot.append((i_say, "[Local Message] waiting gpt response."))
29 |     yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
30 | 
31 |     msg = '正常'
32 |     # ** gpt request **
33 |     gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt)   # 带超时倒计时
34 | 
35 |     chatbot[-1] = (i_say, gpt_say)
36 |     history.append(i_say); history.append(gpt_say)
37 |     yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
38 |     res = write_history_to_file(history)
39 |     promote_file_to_downloadzone(res, chatbot=chatbot)
40 |     chatbot.append(("完成了吗?", res))
41 |     yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
42 | 
43 | 
44 | 
45 | @CatchException
46 | def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
47 |     history = []    # 清空历史,以免输入溢出
48 |     import glob, os
49 |     if os.path.exists(txt):
50 |         project_folder = txt
51 |     else:
52 |         if txt == "": txt = '空空如也的输入栏'
53 |         report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
54 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
55 |         return
56 |     file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
57 |                     # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
58 |                     # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
59 |     if len(file_manifest) == 0:
60 |         report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
61 |         yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
62 |         return
63 |     yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
64 | 


--------------------------------------------------------------------------------
/crazy_functions/辅助功能.py:
--------------------------------------------------------------------------------
 1 | # encoding: utf-8
 2 | # @Time   : 2023/4/19
 3 | # @Author : Spike
 4 | # @Descr   :
 5 | from toolbox import update_ui, get_conf, get_user
 6 | from toolbox import CatchException
 7 | from toolbox import default_user_name
 8 | from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 9 | import shutil
10 | import os
11 | 
12 | 
13 | @CatchException
14 | def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
15 |     if txt:
16 |         show_say = txt
17 |         prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
18 |     else:
19 |         prompt = history[-1]+"\n分析上述回答,再列出用户可能提出的三个问题。"
20 |         show_say = '分析上述回答,再列出用户可能提出的三个问题。'
21 |     gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
22 |         inputs=prompt,
23 |         inputs_show_user=show_say,
24 |         llm_kwargs=llm_kwargs,
25 |         chatbot=chatbot,
26 |         history=history,
27 |         sys_prompt=system_prompt
28 |     )
29 |     chatbot[-1] = (show_say, gpt_say)
30 |     history.extend([show_say, gpt_say])
31 |     yield from update_ui(chatbot=chatbot, history=history)  # 刷新界面
32 | 
33 | 
34 | @CatchException
35 | def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
36 |     chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
37 |     yield from update_ui(chatbot=chatbot, history=history)  # 刷新界面
38 | 
39 |     def _get_log_folder(user=default_user_name):
40 |         PATH_LOGGING = get_conf('PATH_LOGGING')
41 |         _dir = os.path.join(PATH_LOGGING, user)
42 |         if not os.path.exists(_dir): os.makedirs(_dir)
43 |         return _dir
44 | 
45 |     def _get_upload_folder(user=default_user_name):
46 |         PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
47 |         _dir = os.path.join(PATH_PRIVATE_UPLOAD, user)
48 |         return _dir
49 | 
50 |     shutil.rmtree(_get_log_folder(get_user(chatbot)), ignore_errors=True)
51 |     shutil.rmtree(_get_upload_folder(get_user(chatbot)), ignore_errors=True)
52 | 
53 |     chatbot.append(['清除本地缓存数据', '执行完成'])
54 |     yield from update_ui(chatbot=chatbot, history=history)  # 刷新界面


--------------------------------------------------------------------------------
/docs/Dockerfile+ChatGLM:
--------------------------------------------------------------------------------
1 | # 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss
2 | 


--------------------------------------------------------------------------------
/docs/Dockerfile+NoLocal+Latex:
--------------------------------------------------------------------------------
1 | # 此Dockerfile不再维护,请前往docs/GithubAction+NoLocal+Latex
2 | 


--------------------------------------------------------------------------------
/docs/GithubAction+AllCapacity:
--------------------------------------------------------------------------------
 1 | # docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity  --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 .
 2 | 
 3 | # 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
 4 | FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
 5 | 
 6 | # edge-tts需要的依赖,某些pip包所需的依赖
 7 | RUN apt update && apt install ffmpeg build-essential -y
 8 | RUN apt-get install -y fontconfig
 9 | RUN ln -s /usr/local/texlive/2023/texmf-dist/fonts/truetype /usr/share/fonts/truetype/texlive
10 | RUN fc-cache -fv
11 | RUN apt-get clean
12 | 
13 | # use python3 as the system default python
14 | WORKDIR /gpt
15 | RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
16 | # 下载pytorch
17 | RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
18 | # 准备pip依赖
19 | RUN python3 -m pip install openai numpy arxiv rich
20 | RUN python3 -m pip install colorama Markdown pygments pymupdf
21 | RUN python3 -m pip install python-docx moviepy pdfminer
22 | RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
23 | RUN python3 -m pip install rarfile py7zr
24 | RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
25 | # 下载分支
26 | WORKDIR /gpt
27 | RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
28 | WORKDIR /gpt/gpt_academic
29 | RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss
30 | 
31 | RUN python3 -m pip install -r requirements.txt
32 | RUN python3 -m pip install -r request_llms/requirements_moss.txt
33 | RUN python3 -m pip install -r request_llms/requirements_qwen.txt
34 | RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
35 | RUN python3 -m pip install -r request_llms/requirements_newbing.txt
36 | RUN python3 -m pip install nougat-ocr
37 | RUN python3 -m pip cache purge
38 | 
39 | # 预热Tiktoken模块
40 | RUN python3  -c 'from check_proxy import warm_up_modules; warm_up_modules()'
41 | 
42 | # 启动
43 | CMD ["python3", "-u", "main.py"]
44 | 


--------------------------------------------------------------------------------
/docs/GithubAction+ChatGLM+Moss:
--------------------------------------------------------------------------------
 1 | 
 2 | # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
 3 | FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
 4 | RUN apt-get update
 5 | RUN apt-get install -y curl proxychains curl gcc
 6 | RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
 7 | 
 8 | # edge-tts需要的依赖,某些pip包所需的依赖
 9 | RUN apt update && apt install ffmpeg build-essential -y
10 | RUN apt-get clean
11 | 
12 | # use python3 as the system default python
13 | RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
14 | # 下载pytorch
15 | RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
16 | # 下载分支
17 | WORKDIR /gpt
18 | RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
19 | WORKDIR /gpt/gpt_academic
20 | RUN git clone https://github.com/OpenLMLab/MOSS.git request_llms/moss
21 | RUN python3 -m pip install -r requirements.txt
22 | RUN python3 -m pip install -r request_llms/requirements_moss.txt
23 | RUN python3 -m pip install -r request_llms/requirements_qwen.txt
24 | RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
25 | RUN python3 -m pip install -r request_llms/requirements_newbing.txt
26 | RUN python3 -m pip cache purge
27 | 
28 | 
29 | # 预热Tiktoken模块
30 | RUN python3  -c 'from check_proxy import warm_up_modules; warm_up_modules()'
31 | 
32 | # 启动
33 | CMD ["python3", "-u", "main.py"]
34 | 


--------------------------------------------------------------------------------
/docs/GithubAction+JittorLLMs:
--------------------------------------------------------------------------------
 1 | # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
 2 | FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
 3 | ARG useProxyNetwork=''
 4 | RUN apt-get update
 5 | RUN apt-get install -y curl proxychains curl g++
 6 | RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
 7 | 
 8 | # use python3 as the system default python
 9 | RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
10 | 
11 | # 下载pytorch
12 | RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
13 | 
14 | # 下载分支
15 | WORKDIR /gpt
16 | RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
17 | WORKDIR /gpt/gpt_academic
18 | RUN python3 -m pip install -r requirements.txt
19 | RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
20 | RUN python3 -m pip install -r request_llms/requirements_newbing.txt
21 | RUN python3 -m pip install -r request_llms/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I
22 | 
23 | # 下载JittorLLMs
24 | RUN git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llms/jittorllms
25 | 
26 | # edge-tts需要的依赖
27 | RUN apt update && apt install ffmpeg -y
28 | 
29 | # 禁用缓存,确保更新代码
30 | ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache
31 | RUN git pull
32 | 
33 | # 预热Tiktoken模块
34 | RUN python3  -c 'from check_proxy import warm_up_modules; warm_up_modules()'
35 | 
36 | # 启动
37 | CMD ["python3", "-u", "main.py"]
38 | 


--------------------------------------------------------------------------------
/docs/GithubAction+NoLocal:
--------------------------------------------------------------------------------
 1 | # 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
 2 | # 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal -f docs/Dockerfile+NoLocal .
 3 | # 如何运行: docker run --rm -it --net=host gpt-academic-nolocal
 4 | FROM python:3.11
 5 | 
 6 | # 指定路径
 7 | WORKDIR /gpt
 8 | 
 9 | # 装载项目文件
10 | COPY . .
11 | 
12 | # 安装依赖
13 | RUN pip3 install -r requirements.txt
14 | 
15 | # edge-tts需要的依赖
16 | RUN apt update && apt install ffmpeg -y
17 | 
18 | # 可选步骤,用于预热模块
19 | RUN python3  -c 'from check_proxy import warm_up_modules; warm_up_modules()'
20 | 
21 | RUN python3 -m pip cache purge && apt-get clean
22 | 
23 | # 启动
24 | CMD ["python3", "-u", "main.py"]
25 | 


--------------------------------------------------------------------------------
/docs/GithubAction+NoLocal+AudioAssistant:
--------------------------------------------------------------------------------
 1 | # 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
 2 | # 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal -f docs/Dockerfile+NoLocal .
 3 | # 如何运行: docker run --rm -it --net=host gpt-academic-nolocal
 4 | FROM python:3.11
 5 | 
 6 | # 指定路径
 7 | WORKDIR /gpt
 8 | 
 9 | # 装载项目文件
10 | COPY . .
11 | 
12 | # 安装依赖
13 | RUN pip3 install -r requirements.txt
14 | 
15 | # 安装语音插件的额外依赖
16 | RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
17 | 
18 | # edge-tts需要的依赖
19 | RUN apt update && apt install ffmpeg -y
20 | 
21 | # 可选步骤,用于预热模块
22 | RUN python3  -c 'from check_proxy import warm_up_modules; warm_up_modules()'
23 | 
24 | # 启动
25 | CMD ["python3", "-u", "main.py"]
26 | 


--------------------------------------------------------------------------------
/docs/GithubAction+NoLocal+Latex:
--------------------------------------------------------------------------------
 1 | # 此Dockerfile适用于"无本地模型"的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
 2 | # - 1 修改 `config.py`
 3 | # - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex .
 4 | # - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex
 5 | 
 6 | FROM menghuan1918/ubuntu_uv_ctex:latest
 7 | ENV DEBIAN_FRONTEND=noninteractive
 8 | SHELL ["/bin/bash", "-c"]
 9 | WORKDIR /gpt
10 | 
11 | # 先复制依赖文件
12 | COPY requirements.txt .
13 | 
14 | # 安装依赖
15 | RUN pip install --break-system-packages openai numpy arxiv rich colorama Markdown pygments pymupdf python-docx pdfminer \
16 |     && pip install --break-system-packages -r requirements.txt \
17 |     && if [ "$(uname -m)" = "x86_64" ]; then \
18 |     pip install --break-system-packages nougat-ocr; \
19 |     fi \
20 |     && pip cache purge \
21 |     && rm -rf /root/.cache/pip/*
22 | 
23 | # 创建非root用户
24 | RUN useradd -m gptuser && chown -R gptuser /gpt
25 | USER gptuser
26 | 
27 | # 最后才复制代码文件,这样代码更新时只需重建最后几层,可以大幅减少docker pull所需的大小
28 | COPY --chown=gptuser:gptuser . .
29 | 
30 | # 可选步骤,用于预热模块
31 | RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
32 | 
33 | RUN python3 -m pip cache purge
34 | 
35 | # 启动
36 | CMD ["python3", "-u", "main.py"]
37 | 


--------------------------------------------------------------------------------
/docs/GithubAction+NoLocal+Vectordb:
--------------------------------------------------------------------------------
 1 | # 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
 2 | # 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal-vs -f docs/GithubAction+NoLocal+Vectordb .
 3 | # 如何运行: docker run --rm -it --net=host gpt-academic-nolocal-vs
 4 | FROM python:3.11
 5 | 
 6 | # 指定路径
 7 | WORKDIR /gpt
 8 | 
 9 | # 装载项目文件
10 | COPY . .
11 | 
12 | # 安装依赖
13 | RUN pip3 install -r requirements.txt
14 | 
15 | # 安装知识库插件的额外依赖
16 | RUN apt-get update && apt-get install libgl1 -y
17 | RUN pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
18 | RUN pip3 install transformers protobuf langchain sentence-transformers  faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
19 | RUN pip3 install unstructured[all-docs] --upgrade
20 | RUN python3  -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
21 | 
22 | # edge-tts需要的依赖
23 | RUN apt update && apt install ffmpeg -y
24 | 
25 | # 可选步骤,用于预热模块
26 | RUN python3  -c 'from check_proxy import warm_up_modules; warm_up_modules()'
27 | RUN python3 -m pip cache purge && apt-get clean
28 | 
29 | 
30 | # 启动
31 | CMD ["python3", "-u", "main.py"]
32 | 


--------------------------------------------------------------------------------
/docs/WindowsRun.bat:
--------------------------------------------------------------------------------
 1 | @echo off
 2 | setlocal
 3 | 
 4 | :: 设置环境变量
 5 | set ENV_NAME=gpt
 6 | set ENV_PATH=%~dp0%ENV_NAME%
 7 | set SCRIPT_PATH=%~dp0main.py
 8 | 
 9 | :: 判断环境是否已解压
10 | if not exist "%ENV_PATH%" (
11 |     echo Extracting environment...
12 |     mkdir "%ENV_PATH%"
13 |     tar -xzf gpt.tar.gz -C "%ENV_PATH%"
14 |     
15 |     :: 运行conda环境激活脚本
16 |     call "%ENV_PATH%\Scripts\activate.bat"
17 | ) else (
18 |     :: 如果环境已存在,直接激活
19 |     call "%ENV_PATH%\Scripts\activate.bat"
20 | )
21 | echo Start to run program:
22 | :: 运行Python脚本
23 | python "%SCRIPT_PATH%"
24 | 
25 | endlocal
26 | pause


--------------------------------------------------------------------------------
/docs/WithFastapi.md:
--------------------------------------------------------------------------------
 1 | # Running with fastapi
 2 | 
 3 | We currently support fastapi in order to solve sub-path deploy issue.
 4 | 
 5 | 1. change CUSTOM_PATH setting in `config.py`
 6 | 
 7 | ```sh
 8 | nano config.py
 9 | ```
10 | 
11 | 2. Edit main.py
12 | 
13 | ```diff
14 |     auto_opentab_delay()
15 |     - demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
16 |     + demo.queue(concurrency_count=CONCURRENT_COUNT)
17 | 
18 |     - # 如果需要在二级路径下运行
19 |     - # CUSTOM_PATH = get_conf('CUSTOM_PATH')
20 |     - # if CUSTOM_PATH != "/":
21 |     - #     from toolbox import run_gradio_in_subpath
22 |     - #     run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
23 |     - # else:
24 |     - #     demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
25 | 
26 |     + 如果需要在二级路径下运行
27 |     + CUSTOM_PATH = get_conf('CUSTOM_PATH')
28 |     + if CUSTOM_PATH != "/":
29 |     +     from toolbox import run_gradio_in_subpath
30 |     +     run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
31 |     + else:
32 |     +     demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
33 | 
34 | if __name__ == "__main__":
35 |     main()
36 | ```
37 | 
38 | 3. Go!
39 | 
40 | ```sh
41 | python main.py
42 | ```
43 | 


--------------------------------------------------------------------------------
/docs/demo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/docs/demo.jpg


--------------------------------------------------------------------------------
/docs/demo2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/docs/demo2.jpg


--------------------------------------------------------------------------------
/docs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/docs/logo.png


--------------------------------------------------------------------------------
/docs/use_audio.md:
--------------------------------------------------------------------------------
 1 | # 使用音频交互功能
 2 | 
 3 | 
 4 | ## 1. 安装额外依赖
 5 | ```
 6 | pip install --upgrade pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
 7 | ```
 8 | 
 9 | 如果因为特色网络问题导致上述命令无法执行:
10 | 1. git clone alibabacloud-nls-python-sdk这个项目(或者直接前往Github对应网址下载压缩包).
11 | 命令行输入: `git clone https://github.com/aliyun/alibabacloud-nls-python-sdk.git`
12 | 1. 进入alibabacloud-nls-python-sdk目录命令行输入:`python setup.py install`
13 | 
14 | 
15 | ## 2. 配置音频功能开关 和 阿里云APPKEY(config.py/config_private.py/环境变量)
16 | 
17 | - 注册阿里云账号
18 | - 开通 智能语音交互 (有免费白嫖时长)
19 | - 获取token和appkey
20 | - 未来将逐步用其他更廉价的云服务取代阿里云
21 | 
22 | ```
23 | ENABLE_AUDIO = True
24 | ALIYUN_TOKEN = "554a50fcd0bb476c8d07bb630e94d20c"    # 此token已经失效
25 | ALIYUN_APPKEY = "RoPlZrM88DnAFkZK"   # 此appkey已经失效
26 | ```
27 | 
28 | 参考 https://help.aliyun.com/document_detail/450255.html
29 | 先有阿里云开发者账号,登录之后,需要开通 智能语音交互 的功能,可以免费获得一个token,然后在 全部项目 中,创建一个项目,可以获得一个appkey.
30 | 
31 | - 进阶功能
32 | 进一步填写ALIYUN_ACCESSKEY和ALIYUN_SECRET实现自动获取ALIYUN_TOKEN
33 | ```
34 | ALIYUN_APPKEY = "RoP1ZrM84DnAFkZK"
35 | ALIYUN_TOKEN = ""
36 | ALIYUN_ACCESSKEY = "LTAI5q6BrFUzoRXVGUWnekh1"
37 | ALIYUN_SECRET = "eHmI20AVWIaQZ0CiTD2bGQVsaP9i68"
38 | ```
39 | 
40 | 
41 | ## 3.启动
42 | 
43 | 启动gpt-academic `python main.py`
44 | 
45 | ## 4.点击record from microphe,授权音频采集
46 | 
47 | I 如果需要监听自己说话(不监听电脑音频),直接在浏览器中选择对应的麦即可
48 | 
49 | II 如果需要监听电脑音频(不监听自己说话),需要安装`VB-Audio VoiceMeeter`,打开声音控制面板(sound control panel)
50 | - 1 `[把电脑的所有外放声音用VoiceMeeter截留]` 在输出区(playback)选项卡,把VoiceMeeter Input虚拟设备set as default设为默认播放设备。
51 | - 2 `[把截留的声音释放到gpt-academic]` 打开gpt-academic主界面,授权音频采集后,在浏览器地址栏或者类似的地方会出现一个麦克风图标,打开后,按照浏览器的提示,选择VoiceMeeter虚拟麦克风。然后刷新页面,重新授权音频采集。
52 | - 3 `[把截留的声音同时释放到耳机或音响]` 完成第一步之后,您应处于听不到电脑声音的状态。为了在截获音频的同时,避免影响正常使用,请完成这最后一步配置。在声音控制面板(sound control panel)输入区(recording)选项卡,把VoiceMeeter Output虚拟设备set as default。双击进入VoiceMeeter Output虚拟设备的设置。
53 |   - 3-1 进入VoiceMeeter Output虚拟设备子菜单,打开listen选项卡。
54 |   - 3-2 勾选Listen to this device。
55 |   - 3-3 在playback through this device下拉菜单中选择你的正常耳机或音响。
56 | 
57 | III `[把特殊软件(如腾讯会议)的外放声音用VoiceMeeter截留]` 在完成步骤II的基础上,在特殊软件(如腾讯会议)中,打开声音菜单,选择扬声器VoiceMeeter Input,选择麦克风为正常耳机麦。
58 | 
59 | VI 两种音频监听模式切换时,需要刷新页面才有效。
60 | 
61 | VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434
62 | 
63 | ## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能
64 | 


--------------------------------------------------------------------------------
/docs/use_tts.md:
--------------------------------------------------------------------------------
 1 | # 使用TTS文字转语音
 2 | 
 3 | 
 4 | ## 1. 使用EDGE-TTS(简单)
 5 | 
 6 | 将本项目配置项修改如下即可
 7 | 
 8 | ```
 9 | TTS_TYPE = "EDGE_TTS"
10 | EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
11 | ```
12 | 
13 | ## 2. 使用SoVITS(需要有显卡)
14 | 
15 | 使用以下docker-compose.yml文件,先启动SoVITS服务API
16 | 
17 |   1. 创建以下文件夹结构
18 |       ```shell
19 |       .
20 |       ├── docker-compose.yml
21 |       └── reference
22 |           ├── clone_target_txt.txt
23 |           └── clone_target_wave.mp3
24 |       ```
25 |   2. 其中`docker-compose.yml`为
26 |       ```yaml
27 |       version: '3.8'
28 |       services:
29 |         gpt-sovits:
30 |           image: fuqingxu/sovits_gptac_trim:latest
31 |           container_name: sovits_gptac_container
32 |           working_dir: /workspace/gpt_sovits_demo
33 |           environment:
34 |             - is_half=False
35 |             - is_share=False
36 |           volumes:
37 |             - ./reference:/reference
38 |           ports:
39 |             - "19880:9880"  # 19880 为 sovits api 的暴露端口,记住它
40 |           shm_size: 16G
41 |           deploy:
42 |             resources:
43 |               reservations:
44 |                 devices:
45 |                 - driver: nvidia
46 |                   count: "all"
47 |                   capabilities: [gpu]
48 |           command: bash -c "python3 api.py"
49 |       ```
50 |   3. 其中`clone_target_wave.mp3`为需要克隆的角色音频,`clone_target_txt.txt`为该音频对应的文字文本( https://wiki.biligame.com/ys/%E8%A7%92%E8%89%B2%E8%AF%AD%E9%9F%B3 )
51 |   4. 运行`docker-compose up`
52 |   5. 将本项目配置项修改如下即可
53 |       (19880 为 sovits api 的暴露端口,与docker-compose.yml中的端口对应)
54 |       ```
55 |       TTS_TYPE = "LOCAL_SOVITS_API"
56 |       GPT_SOVITS_URL = "http://127.0.0.1:19880"
57 |       ```
58 |   6. 启动本项目


--------------------------------------------------------------------------------
/docs/use_vllm.md:
--------------------------------------------------------------------------------
 1 | # 使用VLLM
 2 | 
 3 | 
 4 | ## 1. 首先启动 VLLM,自行选择模型
 5 | 
 6 | ```
 7 | python -m vllm.entrypoints.openai.api_server --model /home/hmp/llm/cache/Qwen1___5-32B-Chat --tensor-parallel-size 2 --dtype=half
 8 | ```
 9 | 
10 | 这里使用了存储在 `/home/hmp/llm/cache/Qwen1___5-32B-Chat` 的本地模型,可以根据自己的需求更改。
11 | 
12 | ## 2. 测试 VLLM
13 | 
14 | ```
15 | curl http://localhost:8000/v1/chat/completions \
16 | -H "Content-Type: application/json" \
17 | -d '{
18 |   "model": "/home/hmp/llm/cache/Qwen1___5-32B-Chat",
19 |   "messages": [
20 |   {"role": "system", "content": "You are a helpful assistant."},
21 |   {"role": "user", "content": "怎么实现一个去中心化的控制器?"}
22 |   ]
23 | }'
24 | ```
25 | 
26 | ## 3. 配置本项目
27 | 
28 | ```
29 | API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"
30 | LLM_MODEL = "vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=4096)"
31 | API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "http://localhost:8000/v1/chat/completions"}
32 | ```
33 | 
34 | ```
35 | "vllm-/home/hmp/llm/cache/Qwen1___5-32B-Chat(max_token=4096)"
36 | 其中
37 |   "vllm-"                                     是前缀(必要)
38 |   "/home/hmp/llm/cache/Qwen1___5-32B-Chat"    是模型名(必要)
39 |   "(max_token=6666)"                          是配置(非必要)
40 | ```
41 | 
42 | ## 4. 启动!
43 | 
44 | ```
45 | python main.py
46 | ```
47 | 


--------------------------------------------------------------------------------
/request_llms/README.md:
--------------------------------------------------------------------------------
 1 | P.S. 如果您按照以下步骤成功接入了新的大模型,欢迎发Pull Requests(如果您在自己接入新模型的过程中遇到困难,欢迎加README底部QQ群联系群主)
 2 | 
 3 | 
 4 | # 如何接入其他本地大语言模型
 5 | 
 6 | 1. 复制`request_llms/bridge_llama2.py`,重命名为你喜欢的名字
 7 | 
 8 | 2. 修改`load_model_and_tokenizer`方法,加载你的模型和分词器(去该模型官网找demo,复制粘贴即可)
 9 | 
10 | 3. 修改`llm_stream_generator`方法,定义推理模型(去该模型官网找demo,复制粘贴即可)
11 | 
12 | 4. 命令行测试
13 |     - 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了)
14 |     - 运行`python tests/test_llms.py`
15 | 
16 | 5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
17 | 
18 | 6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
19 | 
20 | 
21 | # 如何接入其他在线大语言模型
22 | 
23 | 1. 复制`request_llms/bridge_zhipu.py`,重命名为你喜欢的名字
24 | 
25 | 2. 修改`predict_no_ui_long_connection`
26 | 
27 | 3. 修改`predict`
28 | 
29 | 4. 命令行测试
30 |     - 修改`tests/test_llms.py`(聪慧如您,只需要看一眼该文件就明白怎么修改了)
31 |     - 运行`python tests/test_llms.py`
32 | 
33 | 5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
34 | 
35 | 6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
36 | 


--------------------------------------------------------------------------------
/request_llms/bridge_chatglm4.py:
--------------------------------------------------------------------------------
 1 | model_name = "ChatGLM4"
 2 | cmd_to_install = """
 3 | `pip install -r request_llms/requirements_chatglm4.txt`
 4 | `pip install modelscope`
 5 | `modelscope download --model ZhipuAI/glm-4-9b-chat --local_dir ./THUDM/glm-4-9b-chat`
 6 | """
 7 | 
 8 | 
 9 | from toolbox import get_conf, ProxyNetworkActivate
10 | from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
11 | 
12 | 
13 | # ------------------------------------------------------------------------------------------------------------------------
14 | # 🔌💻 Local Model
15 | # ------------------------------------------------------------------------------------------------------------------------
16 | class GetGLM4Handle(LocalLLMHandle):
17 | 
18 |     def load_model_info(self):
19 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
20 |         self.model_name = model_name
21 |         self.cmd_to_install = cmd_to_install
22 | 
23 |     def load_model_and_tokenizer(self):
24 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
25 |         import torch
26 |         from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
27 |         import os
28 | 
29 |         LOCAL_MODEL_PATH, device = get_conf("CHATGLM_LOCAL_MODEL_PATH", "LOCAL_MODEL_DEVICE")
30 |         model_path = LOCAL_MODEL_PATH
31 |         chatglm_tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
32 |         chatglm_model = AutoModelForCausalLM.from_pretrained(
33 |             model_path,
34 |             torch_dtype=torch.bfloat16,
35 |             low_cpu_mem_usage=True,
36 |             trust_remote_code=True,
37 |             device=device
38 |         ).eval().to(device)
39 |         self._model = chatglm_model
40 |         self._tokenizer = chatglm_tokenizer
41 |         return self._model, self._tokenizer
42 | 
43 | 
44 |     def llm_stream_generator(self, **kwargs):
45 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
46 |         def adaptor(kwargs):
47 |             query = kwargs["query"]
48 |             max_length = kwargs["max_length"]
49 |             top_p = kwargs["top_p"]
50 |             temperature = kwargs["temperature"]
51 |             history = kwargs["history"]
52 |             return query, max_length, top_p, temperature, history
53 | 
54 |         query, max_length, top_p, temperature, history = adaptor(kwargs)
55 |         inputs = self._tokenizer.apply_chat_template([{"role": "user", "content": query}],
56 |                                        add_generation_prompt=True,
57 |                                        tokenize=True,
58 |                                        return_tensors="pt",
59 |                                        return_dict=True
60 |                                        ).to(self._model.device)
61 |         gen_kwargs = {"max_length": max_length, "do_sample": True, "top_k": top_p}
62 | 
63 |         outputs = self._model.generate(**inputs, **gen_kwargs)
64 |         outputs = outputs[:, inputs['input_ids'].shape[1]:]
65 |         response = self._tokenizer.decode(outputs[0], skip_special_tokens=True)
66 |         yield response
67 | 
68 |     def try_to_import_special_deps(self, **kwargs):
69 |         # import something that will raise error if the user does not install requirement_*.txt
70 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
71 |         import importlib
72 | 
73 |         # importlib.import_module('modelscope')
74 | 
75 | 
76 | # ------------------------------------------------------------------------------------------------------------------------
77 | # 🔌💻 GPT-Academic Interface
78 | # ------------------------------------------------------------------------------------------------------------------------
79 | predict_no_ui_long_connection, predict = get_local_llm_predict_fns(
80 |     GetGLM4Handle, model_name, history_format="chatglm3"
81 | )
82 | 


--------------------------------------------------------------------------------
/request_llms/bridge_chatglmonnx.py:
--------------------------------------------------------------------------------
 1 | model_name = "ChatGLM-ONNX"
 2 | cmd_to_install = "`pip install -r request_llms/requirements_chatglm_onnx.txt`"
 3 | 
 4 | 
 5 | from transformers import AutoModel, AutoTokenizer
 6 | import time
 7 | import threading
 8 | import importlib
 9 | from toolbox import update_ui, get_conf
10 | from multiprocessing import Process, Pipe
11 | from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
12 | 
13 | from .chatglmoonx import ChatGLMModel, chat_template
14 | 
15 | 
16 | 
17 | # ------------------------------------------------------------------------------------------------------------------------
18 | # 🔌💻 Local Model
19 | # ------------------------------------------------------------------------------------------------------------------------
20 | class GetONNXGLMHandle(LocalLLMHandle):
21 | 
22 |     def load_model_info(self):
23 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
24 |         self.model_name = model_name
25 |         self.cmd_to_install = cmd_to_install
26 | 
27 |     def load_model_and_tokenizer(self):
28 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
29 |         import os, glob
30 |         if not len(glob.glob("./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/*.bin")) >= 7: # 该模型有七个 bin 文件
31 |             from huggingface_hub import snapshot_download
32 |             snapshot_download(repo_id="K024/ChatGLM-6b-onnx-u8s8", local_dir="./request_llms/ChatGLM-6b-onnx-u8s8")
33 |         def create_model():
34 |             return ChatGLMModel(
35 |                 tokenizer_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/sentencepiece.model",
36 |                 onnx_model_path = "./request_llms/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/chatglm-6b-int8.onnx"
37 |             )
38 |         self._model = create_model()
39 |         return self._model, None
40 | 
41 |     def llm_stream_generator(self, **kwargs):
42 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
43 |         def adaptor(kwargs):
44 |             query = kwargs['query']
45 |             max_length = kwargs['max_length']
46 |             top_p = kwargs['top_p']
47 |             temperature = kwargs['temperature']
48 |             history = kwargs['history']
49 |             return query, max_length, top_p, temperature, history
50 | 
51 |         query, max_length, top_p, temperature, history = adaptor(kwargs)
52 | 
53 |         prompt = chat_template(history, query)
54 |         for answer in self._model.generate_iterate(
55 |             prompt,
56 |             max_generated_tokens=max_length,
57 |             top_k=1,
58 |             top_p=top_p,
59 |             temperature=temperature,
60 |         ):
61 |             yield answer
62 | 
63 |     def try_to_import_special_deps(self, **kwargs):
64 |         # import something that will raise error if the user does not install requirement_*.txt
65 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
66 |         pass
67 | 
68 | 
69 | # ------------------------------------------------------------------------------------------------------------------------
70 | # 🔌💻 GPT-Academic Interface
71 | # ------------------------------------------------------------------------------------------------------------------------
72 | predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name)


--------------------------------------------------------------------------------
/request_llms/bridge_qwen.py:
--------------------------------------------------------------------------------
 1 | import time
 2 | import os
 3 | from toolbox import update_ui, get_conf, update_ui_latest_msg
 4 | from toolbox import check_packages, report_exception, log_chat
 5 | 
 6 | model_name = 'Qwen'
 7 | 
 8 | def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
 9 |                                   observe_window:list=[], console_silence:bool=False):
10 |     """
11 |         ⭐多线程方法
12 |         函数的说明请见 request_llms/bridge_all.py
13 |     """
14 |     watch_dog_patience = 5
15 |     response = ""
16 | 
17 |     from .com_qwenapi import QwenRequestInstance
18 |     sri = QwenRequestInstance()
19 |     for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
20 |         if len(observe_window) >= 1:
21 |             observe_window[0] = response
22 |         if len(observe_window) >= 2:
23 |             if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
24 |     return response
25 | 
26 | def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
27 |     """
28 |         ⭐单线程方法
29 |         函数的说明请见 request_llms/bridge_all.py
30 |     """
31 |     chatbot.append((inputs, ""))
32 |     yield from update_ui(chatbot=chatbot, history=history)
33 | 
34 |     # 尝试导入依赖,如果缺少依赖,则给出安装建议
35 |     try:
36 |         check_packages(["dashscope"])
37 |     except:
38 |         yield from update_ui_latest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade dashscope```。",
39 |                                          chatbot=chatbot, history=history, delay=0)
40 |         return
41 | 
42 |     # 检查DASHSCOPE_API_KEY
43 |     if get_conf("DASHSCOPE_API_KEY") == "":
44 |         yield from update_ui_latest_msg(f"请配置 DASHSCOPE_API_KEY。",
45 |                                          chatbot=chatbot, history=history, delay=0)
46 |         return
47 | 
48 |     if additional_fn is not None:
49 |         from core_functional import handle_core_functionality
50 |         inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
51 |         chatbot[-1] = (inputs, "")
52 |         yield from update_ui(chatbot=chatbot, history=history)
53 | 
54 |     # 开始接收回复
55 |     from .com_qwenapi import QwenRequestInstance
56 |     sri = QwenRequestInstance()
57 |     response = f"[Local Message] 等待{model_name}响应中 ..."
58 |     for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
59 |         chatbot[-1] = (inputs, response)
60 |         yield from update_ui(chatbot=chatbot, history=history)
61 | 
62 |     log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
63 |     # 总结输出
64 |     if response == f"[Local Message] 等待{model_name}响应中 ...":
65 |         response = f"[Local Message] {model_name}响应异常 ..."
66 |     history.extend([inputs, response])
67 |     yield from update_ui(chatbot=chatbot, history=history)


--------------------------------------------------------------------------------
/request_llms/bridge_qwen_local.py:
--------------------------------------------------------------------------------
 1 | model_name = "Qwen_Local"
 2 | cmd_to_install = "`pip install -r request_llms/requirements_qwen_local.txt`"
 3 | 
 4 | from toolbox import ProxyNetworkActivate, get_conf
 5 | from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
 6 | 
 7 | 
 8 | 
 9 | # ------------------------------------------------------------------------------------------------------------------------
10 | # 🔌💻 Local Model
11 | # ------------------------------------------------------------------------------------------------------------------------
12 | class GetQwenLMHandle(LocalLLMHandle):
13 | 
14 |     def load_model_info(self):
15 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
16 |         self.model_name = model_name
17 |         self.cmd_to_install = cmd_to_install
18 | 
19 |     def load_model_and_tokenizer(self):
20 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
21 |         # from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
22 |         from transformers import AutoModelForCausalLM, AutoTokenizer
23 |         from transformers.generation import GenerationConfig
24 |         with ProxyNetworkActivate('Download_LLM'):
25 |             model_id = get_conf('QWEN_LOCAL_MODEL_SELECTION')
26 |             self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
27 |             # use fp16
28 |             model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
29 |             model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True)  # 可指定不同的生成长度、top_p等相关超参
30 |             self._model = model
31 | 
32 |         return self._model, self._tokenizer
33 | 
34 |     def llm_stream_generator(self, **kwargs):
35 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
36 |         def adaptor(kwargs):
37 |             query = kwargs['query']
38 |             max_length = kwargs['max_length']
39 |             top_p = kwargs['top_p']
40 |             temperature = kwargs['temperature']
41 |             history = kwargs['history']
42 |             return query, max_length, top_p, temperature, history
43 | 
44 |         query, max_length, top_p, temperature, history = adaptor(kwargs)
45 | 
46 |         for response in self._model.chat_stream(self._tokenizer, query, history=history):
47 |             yield response
48 | 
49 |     def try_to_import_special_deps(self, **kwargs):
50 |         # import something that will raise error if the user does not install requirement_*.txt
51 |         # 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
52 |         import importlib
53 |         importlib.import_module('modelscope')
54 | 
55 | 
56 | # ------------------------------------------------------------------------------------------------------------------------
57 | # 🔌💻 GPT-Academic Interface
58 | # ------------------------------------------------------------------------------------------------------------------------
59 | predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)


--------------------------------------------------------------------------------
/request_llms/bridge_skylark2.py:
--------------------------------------------------------------------------------
 1 | import time
 2 | from toolbox import update_ui, get_conf, update_ui_latest_msg
 3 | from toolbox import check_packages, report_exception
 4 | 
 5 | model_name = '云雀大模型'
 6 | 
 7 | def validate_key():
 8 |     YUNQUE_SECRET_KEY = get_conf("YUNQUE_SECRET_KEY")
 9 |     if YUNQUE_SECRET_KEY == '': return False
10 |     return True
11 | 
12 | def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
13 |                                   observe_window:list=[], console_silence:bool=False):
14 |     """
15 |         ⭐ 多线程方法
16 |         函数的说明请见 request_llms/bridge_all.py
17 |     """
18 |     watch_dog_patience = 5
19 |     response = ""
20 | 
21 |     if validate_key() is False:
22 |         raise RuntimeError('请配置YUNQUE_SECRET_KEY')
23 | 
24 |     from .com_skylark2api import YUNQUERequestInstance
25 |     sri = YUNQUERequestInstance()
26 |     for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
27 |         if len(observe_window) >= 1:
28 |             observe_window[0] = response
29 |         if len(observe_window) >= 2:
30 |             if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
31 |     return response
32 | 
33 | def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
34 |     """
35 |         ⭐ 单线程方法
36 |         函数的说明请见 request_llms/bridge_all.py
37 |     """
38 |     chatbot.append((inputs, ""))
39 |     yield from update_ui(chatbot=chatbot, history=history)
40 | 
41 |     # 尝试导入依赖,如果缺少依赖,则给出安装建议
42 |     try:
43 |         check_packages(["zhipuai"])
44 |     except:
45 |         yield from update_ui_latest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
46 |                                          chatbot=chatbot, history=history, delay=0)
47 |         return
48 | 
49 |     if validate_key() is False:
50 |         yield from update_ui_latest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
51 |         return
52 | 
53 |     if additional_fn is not None:
54 |         from core_functional import handle_core_functionality
55 |         inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
56 | 
57 |     # 开始接收回复
58 |     from .com_skylark2api import YUNQUERequestInstance
59 |     sri = YUNQUERequestInstance()
60 |     response = f"[Local Message] 等待{model_name}响应中 ..."
61 |     for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
62 |         chatbot[-1] = (inputs, response)
63 |         yield from update_ui(chatbot=chatbot, history=history)
64 | 
65 |     # 总结输出
66 |     if response == f"[Local Message] 等待{model_name}响应中 ...":
67 |         response = f"[Local Message] {model_name}响应异常 ..."
68 |     history.extend([inputs, response])
69 |     yield from update_ui(chatbot=chatbot, history=history)


--------------------------------------------------------------------------------
/request_llms/bridge_spark.py:
--------------------------------------------------------------------------------
 1 | 
 2 | import time
 3 | import threading
 4 | import importlib
 5 | from toolbox import update_ui, get_conf, update_ui_latest_msg
 6 | from multiprocessing import Process, Pipe
 7 | 
 8 | model_name = '星火认知大模型'
 9 | 
10 | def validate_key():
11 |     XFYUN_APPID = get_conf('XFYUN_APPID')
12 |     if XFYUN_APPID == '00000000' or XFYUN_APPID == '':
13 |         return False
14 |     return True
15 | 
16 | def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
17 |                                   observe_window:list=[], console_silence:bool=False):
18 |     """
19 |         ⭐多线程方法
20 |         函数的说明请见 request_llms/bridge_all.py
21 |     """
22 |     watch_dog_patience = 5
23 |     response = ""
24 | 
25 |     if validate_key() is False:
26 |         raise RuntimeError('请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET')
27 | 
28 |     from .com_sparkapi import SparkRequestInstance
29 |     sri = SparkRequestInstance()
30 |     for response in sri.generate(inputs, llm_kwargs, history, sys_prompt, use_image_api=False):
31 |         if len(observe_window) >= 1:
32 |             observe_window[0] = response
33 |         if len(observe_window) >= 2:
34 |             if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
35 |     return response
36 | 
37 | def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
38 |     """
39 |         ⭐单线程方法
40 |         函数的说明请见 request_llms/bridge_all.py
41 |     """
42 |     chatbot.append((inputs, ""))
43 |     yield from update_ui(chatbot=chatbot, history=history)
44 | 
45 |     if validate_key() is False:
46 |         yield from update_ui_latest_msg(lastmsg="[Local Message] 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0)
47 |         return
48 | 
49 |     if additional_fn is not None:
50 |         from core_functional import handle_core_functionality
51 |         inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
52 | 
53 |     # 开始接收回复
54 |     from .com_sparkapi import SparkRequestInstance
55 |     sri = SparkRequestInstance()
56 |     response = f"[Local Message] 等待{model_name}响应中 ..."
57 |     for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
58 |         chatbot[-1] = (inputs, response)
59 |         yield from update_ui(chatbot=chatbot, history=history)
60 | 
61 |     # 总结输出
62 |     if response == f"[Local Message] 等待{model_name}响应中 ...":
63 |         response = f"[Local Message] {model_name}响应异常 ..."
64 |     history.extend([inputs, response])
65 |     yield from update_ui(chatbot=chatbot, history=history)


--------------------------------------------------------------------------------
/request_llms/bridge_taichu.py:
--------------------------------------------------------------------------------
 1 | import time
 2 | import os
 3 | from toolbox import update_ui, get_conf, update_ui_latest_msg, log_chat
 4 | from toolbox import check_packages, report_exception, have_any_recent_upload_image_files
 5 | from toolbox import ChatBotWithCookies
 6 | 
 7 | # model_name = 'Taichu-2.0'
 8 | # taichu_default_model = 'taichu_llm'
 9 | 
10 | def validate_key():
11 |     TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
12 |     if TAICHU_API_KEY == '': return False
13 |     return True
14 | 
15 | def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
16 |                                   observe_window:list=[], console_silence:bool=False):
17 |     """
18 |         ⭐多线程方法
19 |         函数的说明请见 request_llms/bridge_all.py
20 |     """
21 |     watch_dog_patience = 5
22 |     response = ""
23 | 
24 |     # if llm_kwargs["llm_model"] == "taichu":
25 |     #     llm_kwargs["llm_model"] = "taichu"
26 | 
27 |     if validate_key() is False:
28 |         raise RuntimeError('请配置 TAICHU_API_KEY')
29 | 
30 |     # 开始接收回复
31 |     from .com_taichu import TaichuChatInit
32 |     zhipu_bro_init = TaichuChatInit()
33 |     for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt):
34 |         if len(observe_window) >= 1:
35 |             observe_window[0] = response
36 |         if len(observe_window) >= 2:
37 |             if (time.time() - observe_window[1]) > watch_dog_patience:
38 |                 raise RuntimeError("程序终止。")
39 |     return response
40 | 
41 | 
42 | def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWithCookies,
43 |             history:list=[], system_prompt:str='', stream:bool=True, additional_fn:str=None):
44 |     """
45 |         ⭐单线程方法
46 |         函数的说明请见 request_llms/bridge_all.py
47 |     """
48 |     chatbot.append([inputs, ""])
49 |     yield from update_ui(chatbot=chatbot, history=history)
50 | 
51 |     if validate_key() is False:
52 |         yield from update_ui_latest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0)
53 |         return
54 | 
55 |     if additional_fn is not None:
56 |         from core_functional import handle_core_functionality
57 |         inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
58 |         chatbot[-1] = [inputs, ""]
59 |         yield from update_ui(chatbot=chatbot, history=history)
60 | 
61 |     # if llm_kwargs["llm_model"] == "taichu":
62 |     #     llm_kwargs["llm_model"] = taichu_default_model
63 | 
64 |     # 开始接收回复
65 |     from .com_taichu import TaichuChatInit
66 |     zhipu_bro_init = TaichuChatInit()
67 |     for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt):
68 |         chatbot[-1] = [inputs, response]
69 |         yield from update_ui(chatbot=chatbot, history=history)
70 |     history.extend([inputs, response])
71 |     log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
72 |     yield from update_ui(chatbot=chatbot, history=history)


--------------------------------------------------------------------------------
/request_llms/com_taichu.py:
--------------------------------------------------------------------------------
 1 | # encoding: utf-8
 2 | # @Time   : 2024/1/22
 3 | # @Author : Kilig947 & binary husky
 4 | # @Descr   : 兼容最新的智谱Ai
 5 | from toolbox import get_conf
 6 | from toolbox import get_conf, encode_image, get_pictures_list
 7 | import requests
 8 | import json
 9 | class TaichuChatInit:
10 |     def __init__(self): ...
11 | 
12 |     def __conversation_user(self, user_input: str, llm_kwargs:dict):
13 |         return {"role": "user", "content": user_input}
14 | 
15 |     def __conversation_history(self, history:list, llm_kwargs:dict):
16 |         messages = []
17 |         conversation_cnt = len(history) // 2
18 |         if conversation_cnt:
19 |             for index in range(0, 2 * conversation_cnt, 2):
20 |                 what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
21 |                 what_gpt_answer = {
22 |                     "role": "assistant",
23 |                     "content": history[index + 1]
24 |                 }
25 |                 messages.append(what_i_have_asked)
26 |                 messages.append(what_gpt_answer)
27 |         return messages
28 | 
29 |     def generate_chat(self, inputs:str, llm_kwargs:dict, history:list, system_prompt:str):
30 |         TAICHU_API_KEY = get_conf("TAICHU_API_KEY")
31 |         params = {
32 |             'api_key': TAICHU_API_KEY,
33 |             'model_code': 'taichu_llm',
34 |             'question': '\n\n'.join(history) + inputs,
35 |             'prefix': system_prompt,
36 |             'temperature': llm_kwargs.get('temperature', 0.95),
37 |             'stream_format': 'json'
38 |         }
39 | 
40 |         api = 'https://ai-maas.wair.ac.cn/maas/v1/model_api/invoke'
41 |         response = requests.post(api, json=params, stream=True)
42 |         results = ""
43 |         if response.status_code == 200:
44 |             response.encoding = 'utf-8'
45 |             for line in response.iter_lines(decode_unicode=True):
46 |                 try: delta = json.loads(line)['data']['content']
47 |                 except: delta = json.loads(line)['choices'][0]['text']
48 |                 results += delta
49 |                 yield delta, results
50 |         else:
51 |             raise ValueError
52 | 
53 | 
54 | if __name__ == '__main__':
55 |     zhipu = TaichuChatInit()
56 |     zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi')
57 | 


--------------------------------------------------------------------------------
/request_llms/embed_models/bridge_all_embed.py:
--------------------------------------------------------------------------------
 1 | import tiktoken, copy, re
 2 | from functools import lru_cache
 3 | from concurrent.futures import ThreadPoolExecutor
 4 | from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask, read_one_api_model_name
 5 | 
 6 | # Endpoint 重定向
 7 | API_URL_REDIRECT, AZURE_ENDPOINT, AZURE_ENGINE = get_conf("API_URL_REDIRECT", "AZURE_ENDPOINT", "AZURE_ENGINE")
 8 | openai_endpoint = "https://api.openai.com/v1/chat/completions"
 9 | if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
10 | azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
11 | 
12 | 
13 | if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
14 | 
15 | openai_embed_endpoint = openai_endpoint.replace("chat/completions", "embeddings")
16 | 
17 | from .openai_embed import OpenAiEmbeddingModel
18 | 
19 | embed_model_info = {
20 |     # text-embedding-3-small    Increased performance over 2nd generation ada embedding model  |  1,536
21 |     "text-embedding-3-small": {
22 |         "embed_class": OpenAiEmbeddingModel,
23 |         "embed_endpoint": openai_embed_endpoint,
24 |         "embed_dimension": 1536,
25 |     },
26 | 
27 |     # text-embedding-3-large    Most capable embedding model for both english and non-english tasks  |   3,072
28 |     "text-embedding-3-large": {
29 |         "embed_class": OpenAiEmbeddingModel,
30 |         "embed_endpoint": openai_embed_endpoint,
31 |         "embed_dimension": 3072,
32 |     },
33 | 
34 |     # text-embedding-ada-002    Most capable 2nd generation embedding model, replacing 16 first generation models   |  1,536
35 |     "text-embedding-ada-002": {
36 |         "embed_class": OpenAiEmbeddingModel,
37 |         "embed_endpoint": openai_embed_endpoint,
38 |         "embed_dimension": 1536,
39 |     },
40 | }
41 | 


--------------------------------------------------------------------------------
/request_llms/embed_models/openai_embed.py:
--------------------------------------------------------------------------------
 1 | from llama_index.embeddings.openai import OpenAIEmbedding
 2 | from openai import OpenAI
 3 | from toolbox import get_conf
 4 | from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder, ProxyNetworkActivate
 5 | from shared_utils.key_pattern_manager import select_api_key_for_embed_models
 6 | from typing import List, Any
 7 | 
 8 | import numpy as np
 9 | 
10 | def mean_agg(embeddings):
11 |     """Mean aggregation for embeddings."""
12 |     return np.array(embeddings).mean(axis=0).tolist()
13 | 
14 | class EmbeddingModel():
15 | 
16 |     def get_agg_embedding_from_queries(
17 |         self,
18 |         queries: List[str],
19 |         agg_fn = None,
20 |     ):
21 |         """Get aggregated embedding from multiple queries."""
22 |         query_embeddings = [self.get_query_embedding(query) for query in queries]
23 |         agg_fn = agg_fn or mean_agg
24 |         return agg_fn(query_embeddings)
25 | 
26 |     def get_text_embedding_batch(
27 |             self,
28 |             texts: List[str],
29 |             show_progress: bool = False,
30 |         ):
31 |             return self.compute_embedding(texts, batch_mode=True)
32 | 
33 | 
34 | class OpenAiEmbeddingModel(EmbeddingModel):
35 | 
36 |     def __init__(self, llm_kwargs:dict=None):
37 |         self.llm_kwargs = llm_kwargs
38 | 
39 |     def get_query_embedding(self, query: str):
40 |         return self.compute_embedding(query)
41 | 
42 |     def compute_embedding(self, text="这是要计算嵌入的文本", llm_kwargs:dict=None, batch_mode=False):
43 |         from .bridge_all_embed import embed_model_info
44 | 
45 |         # load kwargs
46 |         if llm_kwargs is None:
47 |             llm_kwargs = self.llm_kwargs
48 |         if llm_kwargs is None:
49 |             raise RuntimeError("llm_kwargs is not provided!")
50 | 
51 |         # setup api and req url
52 |         api_key = select_api_key_for_embed_models(llm_kwargs['api_key'], llm_kwargs['embed_model'])
53 |         embed_model = llm_kwargs['embed_model']
54 |         base_url = embed_model_info[llm_kwargs['embed_model']]['embed_endpoint'].replace('embeddings', '')
55 | 
56 |         # send and compute
57 |         with ProxyNetworkActivate("Connect_OpenAI_Embedding"):
58 |             self.oai_client = OpenAI(api_key=api_key, base_url=base_url)
59 |             if batch_mode:
60 |                 input = text
61 |                 assert isinstance(text, list)
62 |             else:
63 |                 input = [text]
64 |                 assert isinstance(text, str)
65 |             res = self.oai_client.embeddings.create(input=input, model=embed_model)
66 | 
67 |         # parse result
68 |         if batch_mode:
69 |             embedding = [d.embedding for d in res.data]
70 |         else:
71 |             embedding = res.data[0].embedding
72 |         return embedding
73 | 
74 |     def embedding_dimension(self, llm_kwargs=None):
75 |         # load kwargs
76 |         if llm_kwargs is None:
77 |             llm_kwargs = self.llm_kwargs
78 |         if llm_kwargs is None:
79 |             raise RuntimeError("llm_kwargs is not provided!")
80 | 
81 |         from .bridge_all_embed import embed_model_info
82 |         return embed_model_info[llm_kwargs['embed_model']]['embed_dimension']
83 | 
84 | if __name__ == "__main__":
85 |     pass


--------------------------------------------------------------------------------
/request_llms/key_manager.py:
--------------------------------------------------------------------------------
 1 | import random
 2 | 
 3 | def Singleton(cls):
 4 |     _instance = {}
 5 | 
 6 |     def _singleton(*args, **kargs):
 7 |         if cls not in _instance:
 8 |             _instance[cls] = cls(*args, **kargs)
 9 |         return _instance[cls]
10 | 
11 |     return _singleton
12 | 
13 | 
14 | @Singleton
15 | class OpenAI_ApiKeyManager():
16 |     def __init__(self, mode='blacklist') -> None:
17 |         # self.key_avail_list = []
18 |         self.key_black_list = []
19 | 
20 |     def add_key_to_blacklist(self, key):
21 |         self.key_black_list.append(key)
22 | 
23 |     def select_avail_key(self, key_list):
24 |         # select key from key_list, but avoid keys also in self.key_black_list, raise error if no key can be found
25 |         available_keys = [key for key in key_list if key not in self.key_black_list]
26 |         if not available_keys:
27 |             raise KeyError("No available key found.")
28 |         selected_key = random.choice(available_keys)
29 |         return selected_key


--------------------------------------------------------------------------------
/request_llms/queued_pipe.py:
--------------------------------------------------------------------------------
 1 | from multiprocessing import Pipe, Queue
 2 | import time
 3 | import threading
 4 | 
 5 | class PipeSide(object):
 6 |     def __init__(self, q_2remote, q_2local) -> None:
 7 |         self.q_2remote = q_2remote
 8 |         self.q_2local = q_2local
 9 | 
10 |     def recv(self):
11 |         return self.q_2local.get()
12 | 
13 |     def send(self, buf):
14 |         self.q_2remote.put(buf)
15 | 
16 |     def poll(self):
17 |         return not self.q_2local.empty()
18 | 
19 | def create_queue_pipe():
20 |     q_p2c = Queue()
21 |     q_c2p = Queue()
22 |     pipe_c = PipeSide(q_2local=q_p2c, q_2remote=q_c2p)
23 |     pipe_p = PipeSide(q_2local=q_c2p, q_2remote=q_p2c)
24 |     return pipe_c, pipe_p
25 | 


--------------------------------------------------------------------------------
/request_llms/requirements_chatglm.txt:
--------------------------------------------------------------------------------
1 | protobuf
2 | cpm_kernels
3 | torch>=1.10
4 | mdtex2html
5 | sentencepiece
6 | 


--------------------------------------------------------------------------------
/request_llms/requirements_chatglm4.txt:
--------------------------------------------------------------------------------
1 | protobuf
2 | cpm_kernels
3 | torch>=1.10
4 | transformers>=4.44
5 | mdtex2html
6 | sentencepiece
7 | accelerate


--------------------------------------------------------------------------------
/request_llms/requirements_chatglm_onnx.txt:
--------------------------------------------------------------------------------
1 | protobuf
2 | cpm_kernels
3 | torch>=1.10
4 | mdtex2html
5 | sentencepiece
6 | numpy
7 | onnxruntime
8 | sentencepiece
9 | 


--------------------------------------------------------------------------------
/request_llms/requirements_jittorllms.txt:
--------------------------------------------------------------------------------
1 | jittor >= 1.3.7.9
2 | jtorch >= 0.1.3
3 | torch
4 | torchvision
5 | pandas
6 | jieba
7 | 


--------------------------------------------------------------------------------
/request_llms/requirements_moss.txt:
--------------------------------------------------------------------------------
1 | torch
2 | sentencepiece
3 | datasets
4 | accelerate
5 | matplotlib
6 | huggingface_hub
7 | triton
8 | 


--------------------------------------------------------------------------------
/request_llms/requirements_newbing.txt:
--------------------------------------------------------------------------------
1 | BingImageCreator
2 | certifi
3 | httpx
4 | prompt_toolkit
5 | requests
6 | rich
7 | websockets
8 | httpx[socks]
9 | 


--------------------------------------------------------------------------------
/request_llms/requirements_qwen.txt:
--------------------------------------------------------------------------------
1 | dashscope
2 | 


--------------------------------------------------------------------------------
/request_llms/requirements_qwen_local.txt:
--------------------------------------------------------------------------------
1 | modelscope
2 | transformers_stream_generator
3 | auto-gptq
4 | optimum
5 | urllib3<2
6 | 


--------------------------------------------------------------------------------
/request_llms/requirements_slackclaude.txt:
--------------------------------------------------------------------------------
1 | slack-sdk==3.21.3
2 | 


--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
 1 | https://public.agent-matrix.com/publish/gradio-3.32.15-py3-none-any.whl
 2 | fastapi==0.110
 3 | gradio-client==0.8
 4 | pypdf2==2.12.1
 5 | httpx<=0.25.2
 6 | zhipuai==2.0.1
 7 | tiktoken>=0.3.3
 8 | requests[socks]
 9 | pydantic==2.9.2
10 | protobuf==3.20
11 | transformers>=4.27.1,<4.42
12 | scipdf_parser>=0.52
13 | spacy==3.7.4
14 | anthropic>=0.18.1
15 | python-markdown-math
16 | pymdown-extensions>=10.14
17 | websocket-client
18 | beautifulsoup4
19 | prompt_toolkit
20 | latex2mathml
21 | python-docx
22 | mdtex2html
23 | dashscope
24 | pyautogen
25 | colorama
26 | Markdown
27 | pygments
28 | edge-tts>=7.0.0
29 | pymupdf
30 | openai
31 | rjsmin
32 | loguru
33 | arxiv
34 | numpy
35 | rich
36 | 
37 | 
38 | llama-index-core==0.10.68
39 | llama-index-legacy==0.9.48
40 | llama-index-readers-file==0.1.33
41 | llama-index-readers-llama-parse==0.1.6
42 | llama-index-embeddings-azure-openai==0.1.10
43 | llama-index-embeddings-openai==0.1.10
44 | llama-parse==0.4.9
45 | mdit-py-plugins>=0.3.3
46 | linkify-it-py==2.0.3


--------------------------------------------------------------------------------
/shared_utils/char_visual_effect.py:
--------------------------------------------------------------------------------
 1 | def is_full_width_char(ch):
 2 |     """判断给定的单个字符是否是全角字符"""
 3 |     if '\u4e00' <= ch <= '\u9fff':
 4 |         return True  # 中文字符
 5 |     if '\uff01' <= ch <= '\uff5e':
 6 |         return True  # 全角符号
 7 |     if '\u3000' <= ch <= '\u303f':
 8 |         return True  # CJK标点符号
 9 |     return False
10 | 
11 | def scrolling_visual_effect(text, scroller_max_len):
12 |     text = text.\
13 |             replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('
#39;, '.')
14 |     place_take_cnt = 0
15 |     pointer = len(text) - 1
16 | 
17 |     if len(text) < scroller_max_len:
18 |         return text
19 | 
20 |     while place_take_cnt < scroller_max_len and pointer > 0:
21 |         if is_full_width_char(text[pointer]): place_take_cnt += 2
22 |         else: place_take_cnt += 1
23 |         pointer -= 1
24 | 
25 |     return text[pointer:]


--------------------------------------------------------------------------------
/shared_utils/colorful.py:
--------------------------------------------------------------------------------
 1 | import platform
 2 | from sys import stdout
 3 | from loguru import logger
 4 | 
 5 | if platform.system()=="Linux":
 6 |     pass
 7 | else:
 8 |     from colorama import init
 9 |     init()
10 | 
11 | # Do you like the elegance of Chinese characters?
12 | def print红(*kw,**kargs):
13 |     print("\033[0;31m",*kw,"\033[0m",**kargs)
14 | def print绿(*kw,**kargs):
15 |     print("\033[0;32m",*kw,"\033[0m",**kargs)
16 | def print黄(*kw,**kargs):
17 |     print("\033[0;33m",*kw,"\033[0m",**kargs)
18 | def print蓝(*kw,**kargs):
19 |     print("\033[0;34m",*kw,"\033[0m",**kargs)
20 | def print紫(*kw,**kargs):
21 |     print("\033[0;35m",*kw,"\033[0m",**kargs)
22 | def print靛(*kw,**kargs):
23 |     print("\033[0;36m",*kw,"\033[0m",**kargs)
24 | 
25 | def print亮红(*kw,**kargs):
26 |     print("\033[1;31m",*kw,"\033[0m",**kargs)
27 | def print亮绿(*kw,**kargs):
28 |     print("\033[1;32m",*kw,"\033[0m",**kargs)
29 | def print亮黄(*kw,**kargs):
30 |     print("\033[1;33m",*kw,"\033[0m",**kargs)
31 | def print亮蓝(*kw,**kargs):
32 |     print("\033[1;34m",*kw,"\033[0m",**kargs)
33 | def print亮紫(*kw,**kargs):
34 |     print("\033[1;35m",*kw,"\033[0m",**kargs)
35 | def print亮靛(*kw,**kargs):
36 |     print("\033[1;36m",*kw,"\033[0m",**kargs)
37 | 
38 | # Do you like the elegance of Chinese characters?
39 | def sprint红(*kw):
40 |     return "\033[0;31m"+' '.join(kw)+"\033[0m"
41 | def sprint绿(*kw):
42 |     return "\033[0;32m"+' '.join(kw)+"\033[0m"
43 | def sprint黄(*kw):
44 |     return "\033[0;33m"+' '.join(kw)+"\033[0m"
45 | def sprint蓝(*kw):
46 |     return "\033[0;34m"+' '.join(kw)+"\033[0m"
47 | def sprint紫(*kw):
48 |     return "\033[0;35m"+' '.join(kw)+"\033[0m"
49 | def sprint靛(*kw):
50 |     return "\033[0;36m"+' '.join(kw)+"\033[0m"
51 | def sprint亮红(*kw):
52 |     return "\033[1;31m"+' '.join(kw)+"\033[0m"
53 | def sprint亮绿(*kw):
54 |     return "\033[1;32m"+' '.join(kw)+"\033[0m"
55 | def sprint亮黄(*kw):
56 |     return "\033[1;33m"+' '.join(kw)+"\033[0m"
57 | def sprint亮蓝(*kw):
58 |     return "\033[1;34m"+' '.join(kw)+"\033[0m"
59 | def sprint亮紫(*kw):
60 |     return "\033[1;35m"+' '.join(kw)+"\033[0m"
61 | def sprint亮靛(*kw):
62 |     return "\033[1;36m"+' '.join(kw)+"\033[0m"
63 | 
64 | def log红(*kw,**kargs):
65 |     logger.opt(depth=1).info(sprint红(*kw))
66 | def log绿(*kw,**kargs):
67 |     logger.opt(depth=1).info(sprint绿(*kw))
68 | def log黄(*kw,**kargs):
69 |     logger.opt(depth=1).info(sprint黄(*kw))
70 | def log蓝(*kw,**kargs):
71 |     logger.opt(depth=1).info(sprint蓝(*kw))
72 | def log紫(*kw,**kargs):
73 |     logger.opt(depth=1).info(sprint紫(*kw))
74 | def log靛(*kw,**kargs):
75 |     logger.opt(depth=1).info(sprint靛(*kw))
76 | 
77 | def log亮红(*kw,**kargs):
78 |     logger.opt(depth=1).info(sprint亮红(*kw))
79 | def log亮绿(*kw,**kargs):
80 |     logger.opt(depth=1).info(sprint亮绿(*kw))
81 | def log亮黄(*kw,**kargs):
82 |     logger.opt(depth=1).info(sprint亮黄(*kw))
83 | def log亮蓝(*kw,**kargs):
84 |     logger.opt(depth=1).info(sprint亮蓝(*kw))
85 | def log亮紫(*kw,**kargs):
86 |     logger.opt(depth=1).info(sprint亮紫(*kw))
87 | def log亮靛(*kw,**kargs):
88 |     logger.opt(depth=1).info(sprint亮靛(*kw))


--------------------------------------------------------------------------------
/shared_utils/connect_void_terminal.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | 
 3 | """
 4 | =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
 5 | 接驳void-terminal:
 6 |     - set_conf:                     在运行过程中动态地修改配置
 7 |     - set_multi_conf:               在运行过程中动态地修改多个配置
 8 |     - get_plugin_handle:            获取插件的句柄
 9 |     - get_plugin_default_kwargs:    获取插件的默认参数
10 |     - get_chat_handle:              获取简单聊天的句柄
11 |     - get_chat_default_kwargs:      获取简单聊天的默认参数
12 | =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
13 | """
14 | 
15 | 
16 | def get_plugin_handle(plugin_name):
17 |     """
18 |     e.g. plugin_name = 'crazy_functions.Markdown_Translate->Markdown翻译指定语言'
19 |     """
20 |     import importlib
21 | 
22 |     assert (
23 |         "->" in plugin_name
24 |     ), "Example of plugin_name: crazy_functions.Markdown_Translate->Markdown翻译指定语言"
25 |     module, fn_name = plugin_name.split("->")
26 |     f_hot_reload = getattr(importlib.import_module(module, fn_name), fn_name)
27 |     return f_hot_reload
28 | 
29 | 
30 | def get_chat_handle():
31 |     """
32 |     Get chat function
33 |     """
34 |     from request_llms.bridge_all import predict_no_ui_long_connection
35 | 
36 |     return predict_no_ui_long_connection
37 | 
38 | 
39 | def get_plugin_default_kwargs():
40 |     """
41 |     Get Plugin Default Arguments
42 |     """
43 |     from toolbox import ChatBotWithCookies, load_chat_cookies
44 | 
45 |     cookies = load_chat_cookies()
46 |     llm_kwargs = {
47 |         "api_key": cookies["api_key"],
48 |         "llm_model": cookies["llm_model"],
49 |         "top_p": 1.0,
50 |         "max_length": None,
51 |         "temperature": 1.0,
52 |     }
53 |     chatbot = ChatBotWithCookies(llm_kwargs)
54 | 
55 |     # txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request
56 |     DEFAULT_FN_GROUPS_kwargs = {
57 |         "main_input": "./README.md",
58 |         "llm_kwargs": llm_kwargs,
59 |         "plugin_kwargs": {},
60 |         "chatbot_with_cookie": chatbot,
61 |         "history": [],
62 |         "system_prompt": "You are a good AI.",
63 |         "user_request": None,
64 |     }
65 |     return DEFAULT_FN_GROUPS_kwargs
66 | 
67 | 
68 | def get_chat_default_kwargs():
69 |     """
70 |     Get Chat Default Arguments
71 |     """
72 |     from toolbox import load_chat_cookies
73 | 
74 |     cookies = load_chat_cookies()
75 |     llm_kwargs = {
76 |         "api_key": cookies["api_key"],
77 |         "llm_model": cookies["llm_model"],
78 |         "top_p": 1.0,
79 |         "max_length": None,
80 |         "temperature": 1.0,
81 |     }
82 |     default_chat_kwargs = {
83 |         "inputs": "Hello there, are you ready?",
84 |         "llm_kwargs": llm_kwargs,
85 |         "history": [],
86 |         "sys_prompt": "You are AI assistant",
87 |         "observe_window": None,
88 |         "console_silence": False,
89 |     }
90 | 
91 |     return default_chat_kwargs
92 | 


--------------------------------------------------------------------------------
/shared_utils/logging.py:
--------------------------------------------------------------------------------
 1 | from loguru import logger
 2 | import logging
 3 | import sys
 4 | import os
 5 | 
 6 | def chat_log_filter(record):
 7 |     return "chat_msg" in record["extra"]
 8 | 
 9 | def not_chat_log_filter(record):
10 |     return "chat_msg" not in record["extra"]
11 | 
12 | def formatter_with_clip(record):
13 |     # Note this function returns the string to be formatted, not the actual message to be logged
14 |     # record["extra"]["serialized"] = "555555"
15 |     max_len = 12
16 |     record['function_x'] = record['function'].center(max_len)
17 |     if len(record['function_x']) > max_len:
18 |         record['function_x'] = ".." + record['function_x'][-(max_len-2):]
19 |     record['line_x'] = str(record['line']).ljust(3)
20 |     return '<green>{time:HH:mm}</green> | <cyan>{function_x}</cyan>:<cyan>{line_x}</cyan> | <level>{message}</level>\n'
21 | 
22 | def setup_logging(PATH_LOGGING):
23 |     
24 |     admin_log_path = os.path.join(PATH_LOGGING, "admin")
25 |     os.makedirs(admin_log_path, exist_ok=True)
26 |     sensitive_log_path = os.path.join(admin_log_path, "chat_secrets.log")
27 |     regular_log_path = os.path.join(admin_log_path, "console_log.log")
28 |     logger.remove()
29 |     logger.configure(
30 |         levels=[dict(name="WARNING", color="<g>")],
31 |     )
32 | 
33 |     logger.add(
34 |         sys.stderr, 
35 |         format=formatter_with_clip,
36 |         # format='<green>{time:HH:mm}</green> | <cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>',
37 |         filter=(lambda record: not chat_log_filter(record)),
38 |         colorize=True,
39 |         enqueue=True
40 |     )
41 | 
42 |     logger.add(
43 |         sensitive_log_path, 
44 |         format='<green>{time:MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>',
45 |         rotation="10 MB",
46 |         filter=chat_log_filter,
47 |         enqueue=True, 
48 |     )
49 | 
50 |     logger.add(
51 |         regular_log_path, 
52 |         format='<green>{time:MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>',
53 |         rotation="10 MB",
54 |         filter=not_chat_log_filter,
55 |         enqueue=True, 
56 |     )
57 | 
58 |     logging.getLogger("httpx").setLevel(logging.WARNING)
59 | 
60 |     logger.warning(f"所有对话记录将自动保存在本地目录{sensitive_log_path}, 请注意自我隐私保护哦!")
61 | 
62 | 
63 | # logger.bind(chat_msg=True).info("This message is logged to the file!")
64 | # logger.debug(f"debug message")
65 | # logger.info(f"info message")
66 | # logger.success(f"success message")
67 | # logger.error(f"error message")
68 | # logger.add("special.log", filter=lambda record: "special" in record["extra"])
69 | # logger.debug("This message is not logged to the file")
70 | 


--------------------------------------------------------------------------------
/shared_utils/map_names.py:
--------------------------------------------------------------------------------
 1 | import re
 2 | mapping_dic = {
 3 |     # "qianfan": "qianfan(文心一言大模型)",
 4 |     # "zhipuai": "zhipuai(智谱GLM4超级模型🔥)",
 5 |     # "gpt-4-1106-preview": "gpt-4-1106-preview(新调优版本GPT-4🔥)",
 6 |     # "gpt-4-vision-preview": "gpt-4-vision-preview(识图模型GPT-4V)",
 7 | }
 8 | 
 9 | rev_mapping_dic = {}
10 | for k, v in mapping_dic.items():
11 |     rev_mapping_dic[v] = k
12 | 
13 | def map_model_to_friendly_names(m):
14 |     if m in mapping_dic:
15 |         return mapping_dic[m]
16 |     return m
17 | 
18 | def map_friendly_names_to_model(m):
19 |     if m in rev_mapping_dic:
20 |         return rev_mapping_dic[m]
21 |     return m
22 | 
23 | def read_one_api_model_name(model: str):
24 |     """return real model name and max_token.
25 |     """
26 |     max_token_pattern = r"\(max_token=(\d+)\)"
27 |     match = re.search(max_token_pattern, model)
28 |     if match:
29 |         max_token_tmp = match.group(1)  # 获取 max_token 的值
30 |         max_token_tmp = int(max_token_tmp)
31 |         model = re.sub(max_token_pattern, "", model)  # 从原字符串中删除 "(max_token=...)"
32 |     else:
33 |         max_token_tmp = 4096
34 |     return model, max_token_tmp


--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/tests/__init__.py


--------------------------------------------------------------------------------
/tests/init_test.py:
--------------------------------------------------------------------------------
 1 | def validate_path():
 2 |     import os, sys
 3 | 
 4 |     os.path.dirname(__file__)
 5 |     root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
 6 |     os.chdir(root_dir_assume)
 7 |     sys.path.append(root_dir_assume)
 8 | 
 9 | 
10 | validate_path()  # validate path so you can run from base directory
11 | 


--------------------------------------------------------------------------------
/tests/test_anim_gen.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
 3 | """
 4 | 
 5 | import init_test
 6 | import os, sys
 7 | 
 8 | 
 9 | if __name__ == "__main__":
10 |     from test_utils import plugin_test
11 | 
12 |     plugin_test(plugin='crazy_functions.数学动画生成manim->动画生成', main_input="A point moving along function culve y=sin(x), starting from x=0 and stop at x=4*\pi.")
13 | 


--------------------------------------------------------------------------------
/tests/test_bilibili_down.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
 3 | """
 4 | 
 5 | import init_test
 6 | import os, sys
 7 | 
 8 | if __name__ == "__main__":
 9 |     from experimental_mods.get_bilibili_resource import download_bilibili
10 |     download_bilibili("BV1LSSHYXEtv", only_audio=True, user_name="test")
11 | 
12 | # if __name__ == "__main__":
13 | #     from test_utils import plugin_test
14 | 
15 | #     plugin_test(plugin='crazy_functions.VideoResource_GPT->视频任务', main_input="帮我找到《天文馆的猫》,歌手泠鸢")
16 | 


--------------------------------------------------------------------------------
/tests/test_doc2x.py:
--------------------------------------------------------------------------------
1 | import init_test
2 | 
3 | from crazy_functions.pdf_fns.parse_pdf_via_doc2x import 解析PDF_DOC2X_转Latex
4 | 
5 | # 解析PDF_DOC2X_转Latex("gpt_log/arxiv_cache_old/2410.10819/workfolder/merge.pdf")
6 | # 解析PDF_DOC2X_转Latex("gpt_log/arxiv_cache_ooo/2410.07095/workfolder/merge.pdf")
7 | 解析PDF_DOC2X_转Latex("2410.11190v2.pdf")
8 | 


--------------------------------------------------------------------------------
/tests/test_key_pattern_manager.py:
--------------------------------------------------------------------------------
 1 | import unittest
 2 | 
 3 | def validate_path():
 4 |     import os, sys
 5 | 
 6 |     os.path.dirname(__file__)
 7 |     root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
 8 |     os.chdir(root_dir_assume)
 9 |     sys.path.append(root_dir_assume)
10 | 
11 | 
12 | validate_path()  # validate path so you can run from base directory
13 | 
14 | from shared_utils.key_pattern_manager import is_openai_api_key
15 | 
16 | class TestKeyPatternManager(unittest.TestCase):
17 |     def test_is_openai_api_key_with_valid_key(self):
18 |         key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
19 |         self.assertTrue(is_openai_api_key(key))
20 | 
21 |         key = "sx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
22 |         self.assertFalse(is_openai_api_key(key))
23 | 
24 |         key = "sess-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
25 |         self.assertTrue(is_openai_api_key(key))
26 | 
27 |         key = "sess-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
28 |         self.assertFalse(is_openai_api_key(key))
29 | 
30 |         key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxx"
31 |         self.assertTrue(is_openai_api_key(key))
32 |         key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxx-xxxxxxxxxxxxxxxxxxxx"
33 |         self.assertTrue(is_openai_api_key(key))
34 |         key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxx-xxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxx-xxxxxx-xxxxxxxxxxxxx"
35 |         self.assertTrue(is_openai_api_key(key))
36 |         key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxx-xxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxx-xxxxxxxxxxxxxxxxxx"
37 |         self.assertFalse(is_openai_api_key(key))
38 |         key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxx-xxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxx-xxxxxxxxxxxxxxxxxxxxx"
39 |         self.assertFalse(is_openai_api_key(key))
40 | 
41 |         key = "sk-proj-xx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxx-xxxxxxxx"
42 |         self.assertTrue(is_openai_api_key(key))
43 |         key = "sk-proj-xx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxx-xxxxxxxx"
44 |         self.assertTrue(is_openai_api_key(key))
45 |         key = "sk-proj-xx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxxxxxxxxxxxxxxx-xxxxxxxx"
46 |         self.assertFalse(is_openai_api_key(key))
47 |         key = "sk-proj-xx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxx"
48 |         self.assertFalse(is_openai_api_key(key))
49 |         key = "sk-proj-xx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxxxxx_xxxxxxxxxxxxxxxxxx-xxx啊xxxxxxx"
50 |         self.assertFalse(is_openai_api_key(key))
51 | 
52 | 
53 |     def test_is_openai_api_key_with_invalid_key(self):
54 |         key = "invalid_key"
55 |         self.assertFalse(is_openai_api_key(key))
56 | 
57 |     def test_is_openai_api_key_with_custom_pattern(self):
58 |         # Assuming you have set a custom pattern in your configuration
59 |         key = "custom-pattern-key"
60 |         self.assertFalse(is_openai_api_key(key))
61 | 
62 | if __name__ == '__main__':
63 |     unittest.main()


--------------------------------------------------------------------------------
/tests/test_latex_auto_correct.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
 3 | """
 4 | 
 5 | 
 6 | import os, sys, importlib
 7 | 
 8 | 
 9 | def validate_path():
10 |     dir_name = os.path.dirname(__file__)
11 |     root_dir_assume = os.path.abspath(dir_name + "/..")
12 |     os.chdir(root_dir_assume)
13 |     sys.path.append(root_dir_assume)
14 | 
15 | 
16 | validate_path()  # 返回项目根路径
17 | 
18 | if __name__ == "__main__":
19 |     plugin_test = importlib.import_module('test_utils').plugin_test
20 | 
21 | 
22 |     # plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2203.01927")
23 |     # plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="gpt_log/arxiv_cache/2203.01927/workfolder")
24 |     # plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2410.05779")
25 |     plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="gpt_log/default_user/workfolder")
26 | 
27 | 


--------------------------------------------------------------------------------
/tests/test_llms.py:
--------------------------------------------------------------------------------
 1 | # """
 2 | # 对各个llm模型进行单元测试
 3 | # """
 4 | def validate_path():
 5 |     import os, sys
 6 | 
 7 |     os.path.dirname(__file__)
 8 |     root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
 9 |     os.chdir(root_dir_assume)
10 |     sys.path.append(root_dir_assume)
11 | 
12 | 
13 | validate_path()  # validate path so you can run from base directory
14 | 
15 | if "在线模型":
16 |     if __name__ == "__main__":
17 |         from request_llms.bridge_taichu import predict_no_ui_long_connection
18 |         # from request_llms.bridge_cohere import predict_no_ui_long_connection
19 |         # from request_llms.bridge_spark import predict_no_ui_long_connection
20 |         # from request_llms.bridge_zhipu import predict_no_ui_long_connection
21 |         # from request_llms.bridge_chatglm3 import predict_no_ui_long_connection
22 |         llm_kwargs = {
23 |             "llm_model": "taichu",
24 |             "max_length": 4096,
25 |             "top_p": 1,
26 |             "temperature": 1,
27 |         }
28 | 
29 |         result = predict_no_ui_long_connection(
30 |             inputs="请问什么是质子?", llm_kwargs=llm_kwargs, history=["你好", "我好!"], sys_prompt="系统"
31 |         )
32 |         print("final result:", result)
33 |         print("final result:", result)
34 | 
35 | 
36 | if "本地模型":
37 |     if __name__ == "__main__":
38 |         # from request_llms.bridge_newbingfree import predict_no_ui_long_connection
39 |         # from request_llms.bridge_moss import predict_no_ui_long_connection
40 |         # from request_llms.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
41 |         # from request_llms.bridge_jittorllms_llama import predict_no_ui_long_connection
42 |         # from request_llms.bridge_claude import predict_no_ui_long_connection
43 |         # from request_llms.bridge_internlm import predict_no_ui_long_connection
44 |         # from request_llms.bridge_deepseekcoder import predict_no_ui_long_connection
45 |         # from request_llms.bridge_qwen_7B import predict_no_ui_long_connection
46 |         # from request_llms.bridge_qwen_local import predict_no_ui_long_connection
47 |         llm_kwargs = {
48 |             "max_length": 4096,
49 |             "top_p": 1,
50 |             "temperature": 1,
51 |         }
52 |         result = predict_no_ui_long_connection(
53 |             inputs="请问什么是质子?", llm_kwargs=llm_kwargs, history=["你好", "我好!"], sys_prompt=""
54 |         )
55 |         print("final result:", result)
56 | 
57 | 


--------------------------------------------------------------------------------
/tests/test_media.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
 3 | """
 4 | 
 5 | import init_test
 6 | import os, sys
 7 | 
 8 | 
 9 | if __name__ == "__main__":
10 |     from test_utils import plugin_test
11 | 
12 |     plugin_test(plugin='crazy_functions.VideoResource_GPT->多媒体任务', main_input="我想找一首歌,里面有句歌词是“turn your face towards the sun”")
13 | 
14 |     # plugin_test(plugin='crazy_functions.Internet_GPT->连接网络回答问题', main_input="谁是应急食品?")
15 | 
16 |     # plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
17 | 
18 |     # plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2307.07522")
19 | 
20 |     # plugin_test(plugin='crazy_functions.PDF_Translate->批量翻译PDF文档', main_input='build/pdf/t1.pdf')
21 | 
22 |     # plugin_test(
23 |     #     plugin="crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF",
24 |     #     main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
25 |     # )
26 | 
27 |     # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
28 | 
29 |     # plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
30 | 
31 |     # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析')
32 | 
33 |     # plugin_test(plugin='crazy_functions.命令行助手->命令行助手', main_input='查看当前的docker容器列表')
34 | 
35 |     # plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个Python项目', main_input="crazy_functions/test_project/python/dqn")
36 | 
37 |     # plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个C项目', main_input="crazy_functions/test_project/cpp/cppipc")
38 | 
39 |     # plugin_test(plugin='crazy_functions.Latex_Project_Polish->Latex英文润色', main_input="crazy_functions/test_project/latex/attention")
40 | 
41 |     # plugin_test(plugin='crazy_functions.Markdown_Translate->Markdown中译英', main_input="README.md")
42 | 
43 |     # plugin_test(plugin='crazy_functions.PDF_Translate->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
44 | 
45 |     # plugin_test(plugin='crazy_functions.谷歌检索小助手->谷歌检索小助手', main_input="https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG=")
46 | 
47 |     # plugin_test(plugin='crazy_functions.总结word文档->总结word文档', main_input="crazy_functions/test_project/pdf_and_word")
48 | 
49 |     # plugin_test(plugin='crazy_functions.下载arxiv论文翻译摘要->下载arxiv论文并翻译摘要', main_input="1812.10695")
50 | 
51 |     # plugin_test(plugin='crazy_functions.解析JupyterNotebook->解析ipynb文件', main_input="crazy_functions/test_samples")
52 | 
53 |     # plugin_test(plugin='crazy_functions.数学动画生成manim->动画生成', main_input="A ball split into 2, and then split into 4, and finally split into 8.")
54 | 
55 |     # for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]:
56 |     #     plugin_test(plugin='crazy_functions.Markdown_Translate->Markdown翻译指定语言', main_input="README.md", advanced_arg={"advanced_arg": lang})
57 | 
58 |     # plugin_test(plugin='crazy_functions.知识库文件注入->知识库文件注入', main_input="./")
59 | 
60 |     # plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="What is the installation method?")
61 | 
62 |     # plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
63 | 
64 |     # plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2210.03629")
65 | 
66 | 


--------------------------------------------------------------------------------
/tests/test_plugins.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
 3 | """
 4 | 
 5 | import init_test
 6 | import os, sys
 7 | 
 8 | 
 9 | if __name__ == "__main__":
10 |     from test_utils import plugin_test
11 | 
12 |     plugin_test(plugin='crazy_functions.SourceCode_Comment->注释Python项目', main_input="build/test/python_comment")
13 | 
14 |     # plugin_test(plugin='crazy_functions.Internet_GPT->连接网络回答问题', main_input="谁是应急食品?")
15 | 
16 |     # plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
17 | 
18 |     # plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2307.07522")
19 | 
20 |     # plugin_test(plugin='crazy_functions.PDF_Translate->批量翻译PDF文档', main_input='build/pdf/t1.pdf')
21 | 
22 |     # plugin_test(
23 |     #     plugin="crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF",
24 |     #     main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix",
25 |     # )
26 | 
27 |     # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
28 | 
29 |     # plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
30 | 
31 |     # plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析')
32 | 
33 |     # plugin_test(plugin='crazy_functions.命令行助手->命令行助手', main_input='查看当前的docker容器列表')
34 | 
35 |     # plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个Python项目', main_input="crazy_functions/test_project/python/dqn")
36 | 
37 |     # plugin_test(plugin='crazy_functions.SourceCode_Analyse->解析一个C项目', main_input="crazy_functions/test_project/cpp/cppipc")
38 | 
39 |     # plugin_test(plugin='crazy_functions.Latex_Project_Polish->Latex英文润色', main_input="crazy_functions/test_project/latex/attention")
40 | 
41 |     # plugin_test(plugin='crazy_functions.Markdown_Translate->Markdown中译英', main_input="README.md")
42 | 
43 |     # plugin_test(plugin='crazy_functions.PDF_Translate->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
44 | 
45 |     # plugin_test(plugin='crazy_functions.谷歌检索小助手->谷歌检索小助手', main_input="https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG=")
46 | 
47 |     # plugin_test(plugin='crazy_functions.总结word文档->总结word文档', main_input="crazy_functions/test_project/pdf_and_word")
48 | 
49 |     # plugin_test(plugin='crazy_functions.下载arxiv论文翻译摘要->下载arxiv论文并翻译摘要', main_input="1812.10695")
50 | 
51 |     # plugin_test(plugin='crazy_functions.解析JupyterNotebook->解析ipynb文件', main_input="crazy_functions/test_samples")
52 | 
53 |     # plugin_test(plugin='crazy_functions.数学动画生成manim->动画生成', main_input="A ball split into 2, and then split into 4, and finally split into 8.")
54 | 
55 |     # for lang in ["English", "French", "Japanese", "Korean", "Russian", "Italian", "German", "Portuguese", "Arabic"]:
56 |     #     plugin_test(plugin='crazy_functions.Markdown_Translate->Markdown翻译指定语言', main_input="README.md", advanced_arg={"advanced_arg": lang})
57 | 
58 |     # plugin_test(plugin='crazy_functions.知识库文件注入->知识库文件注入', main_input="./")
59 | 
60 |     # plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="What is the installation method?")
61 | 
62 |     # plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?")
63 | 
64 |     # plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2210.03629")
65 | 
66 | 


--------------------------------------------------------------------------------
/tests/test_rag.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/tests/test_rag.py


--------------------------------------------------------------------------------
/tests/test_safe_pickle.py:
--------------------------------------------------------------------------------
 1 | def validate_path():
 2 |     import os, sys
 3 |     os.path.dirname(__file__)
 4 |     root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
 5 |     os.chdir(root_dir_assume)
 6 |     sys.path.append(root_dir_assume)
 7 | validate_path()  # validate path so you can run from base directory
 8 | 
 9 | from crazy_functions.latex_fns.latex_pickle_io import objdump, objload
10 | from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
11 | pfg = LatexPaperFileGroup()
12 | pfg.get_token_num = None
13 | pfg.target = "target_elem"
14 | x = objdump(pfg)
15 | t = objload()
16 | 
17 | print(t.target)


--------------------------------------------------------------------------------
/tests/test_searxng.py:
--------------------------------------------------------------------------------
 1 | def validate_path():
 2 |     import os, sys
 3 |     os.path.dirname(__file__)
 4 |     root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..")
 5 |     os.chdir(root_dir_assume)
 6 |     sys.path.append(root_dir_assume)
 7 | validate_path()  # validate path so you can run from base directory
 8 | 
 9 | from toolbox import get_conf
10 | import requests
11 | 
12 | def searxng_request(query, proxies, categories='general', searxng_url=None, engines=None):
13 |     url = 'http://localhost:50001/'
14 | 
15 |     if engines is None:
16 |         engine = 'bing,'
17 |     if categories == 'general':
18 |         params = {
19 |             'q': query,         # 搜索查询
20 |             'format': 'json',   # 输出格式为JSON
21 |             'language': 'zh',   # 搜索语言
22 |             'engines': engine,
23 |         }
24 |     elif categories == 'science':
25 |         params = {
26 |             'q': query,         # 搜索查询
27 |             'format': 'json',   # 输出格式为JSON
28 |             'language': 'zh',   # 搜索语言
29 |             'categories': 'science'
30 |         }
31 |     else:
32 |         raise ValueError('不支持的检索类型')
33 |     headers = {
34 |         'Accept-Language': 'zh-CN,zh;q=0.9',
35 |         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
36 |         'X-Forwarded-For': '112.112.112.112',
37 |         'X-Real-IP': '112.112.112.112'
38 |     }
39 |     results = []
40 |     response = requests.post(url, params=params, headers=headers, proxies=proxies, timeout=30)
41 |     if response.status_code == 200:
42 |         json_result = response.json()
43 |         for result in json_result['results']:
44 |             item = {
45 |                 "title": result.get("title", ""),
46 |                 "content": result.get("content", ""),
47 |                 "link": result["url"],
48 |             }
49 |             print(result['engines'])
50 |             results.append(item)
51 |         return results
52 |     else:
53 |         if response.status_code == 429:
54 |             raise ValueError("Searxng(在线搜索服务)当前使用人数太多,请稍后。")
55 |         else:
56 |             raise ValueError("在线搜索失败,状态码: " + str(response.status_code) + '\t' + response.content.decode('utf-8'))
57 | res = searxng_request("vr environment", None, categories='science', searxng_url=None, engines=None)
58 | print(res)


--------------------------------------------------------------------------------
/tests/test_social_helper.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
 3 | """
 4 | 
 5 | import init_test
 6 | import os, sys
 7 | 
 8 | 
 9 | if __name__ == "__main__":
10 |     from test_utils import plugin_test
11 |     plugin_test(
12 |         plugin='crazy_functions.Social_Helper->I人助手', 
13 |         main_input="""
14 | 添加联系人:
15 | 艾德·史塔克:我的养父,他是临冬城的公爵。
16 | 凯特琳·史塔克:我的养母,她对我态度冷淡,因为我是私生子。
17 | 罗柏·史塔克:我的哥哥,他是北境的继承人。
18 | 艾莉亚·史塔克:我的妹妹,她和我关系亲密,性格独立坚强。
19 | 珊莎·史塔克:我的妹妹,她梦想成为一位淑女。
20 | 布兰·史塔克:我的弟弟,他有预知未来的能力。
21 | 瑞肯·史塔克:我的弟弟,他是个天真无邪的小孩。
22 | 山姆威尔·塔利:我的朋友,他在守夜人军团中与我并肩作战。
23 | 伊格瑞特:我的恋人,她是野人中的一员。
24 |         """)
25 | 


--------------------------------------------------------------------------------
/tests/test_tts.py:
--------------------------------------------------------------------------------
 1 | import edge_tts
 2 | import os
 3 | import httpx
 4 | from toolbox import get_conf
 5 | 
 6 | 
 7 | async def test_tts():    
 8 |     async with httpx.AsyncClient() as client:
 9 |         try:
10 |             # Forward the request to the target service
11 |             import tempfile
12 |             import edge_tts
13 |             import wave
14 |             import uuid
15 |             from pydub import AudioSegment
16 |             voice = get_conf("EDGE_TTS_VOICE")
17 |             tts = edge_tts.Communicate(text="测试", voice=voice)
18 |             temp_folder = tempfile.gettempdir()
19 |             temp_file_name = str(uuid.uuid4().hex)
20 |             temp_file = os.path.join(temp_folder, f'{temp_file_name}.mp3')
21 |             await tts.save(temp_file)
22 |             try:
23 |                 mp3_audio = AudioSegment.from_file(temp_file, format="mp3")
24 |                 mp3_audio.export(temp_file, format="wav")
25 |                 with open(temp_file, 'rb') as wav_file: t = wav_file.read()
26 |             except:
27 |                 raise RuntimeError("ffmpeg未安装,无法处理EdgeTTS音频。安装方法见`https://github.com/jiaaro/pydub#getting-ffmpeg-set-up`")
28 |         except httpx.RequestError as e:
29 |             raise RuntimeError(f"请求失败: {e}")
30 |         
31 | if __name__ == "__main__":
32 |     import asyncio
33 |     asyncio.run(test_tts())


--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
  1 | from toolbox import get_conf
  2 | from toolbox import set_conf
  3 | from toolbox import set_multi_conf
  4 | from toolbox import get_plugin_handle
  5 | from toolbox import get_plugin_default_kwargs
  6 | from toolbox import get_chat_handle
  7 | from toolbox import get_chat_default_kwargs
  8 | from functools import wraps
  9 | import sys
 10 | import os
 11 | 
 12 | 
 13 | def chat_to_markdown_str(chat):
 14 |     result = ""
 15 |     for i, cc in enumerate(chat):
 16 |         result += f"\n\n{cc[0]}\n\n{cc[1]}"
 17 |         if i != len(chat) - 1:
 18 |             result += "\n\n---"
 19 |     return result
 20 | 
 21 | 
 22 | def silence_stdout(func):
 23 |     @wraps(func)
 24 |     def wrapper(*args, **kwargs):
 25 |         _original_stdout = sys.stdout
 26 |         sys.stdout = open(os.devnull, "w")
 27 |         sys.stdout.reconfigure(encoding="utf-8")
 28 |         for q in func(*args, **kwargs):
 29 |             sys.stdout = _original_stdout
 30 |             yield q
 31 |             sys.stdout = open(os.devnull, "w")
 32 |             sys.stdout.reconfigure(encoding="utf-8")
 33 |         sys.stdout.close()
 34 |         sys.stdout = _original_stdout
 35 | 
 36 |     return wrapper
 37 | 
 38 | 
 39 | def silence_stdout_fn(func):
 40 |     @wraps(func)
 41 |     def wrapper(*args, **kwargs):
 42 |         _original_stdout = sys.stdout
 43 |         sys.stdout = open(os.devnull, "w")
 44 |         sys.stdout.reconfigure(encoding="utf-8")
 45 |         result = func(*args, **kwargs)
 46 |         sys.stdout.close()
 47 |         sys.stdout = _original_stdout
 48 |         return result
 49 | 
 50 |     return wrapper
 51 | 
 52 | 
 53 | class VoidTerminal:
 54 |     def __init__(self) -> None:
 55 |         pass
 56 | 
 57 | 
 58 | vt = VoidTerminal()
 59 | vt.get_conf = silence_stdout_fn(get_conf)
 60 | vt.set_conf = silence_stdout_fn(set_conf)
 61 | vt.set_multi_conf = silence_stdout_fn(set_multi_conf)
 62 | vt.get_plugin_handle = silence_stdout_fn(get_plugin_handle)
 63 | vt.get_plugin_default_kwargs = silence_stdout_fn(get_plugin_default_kwargs)
 64 | vt.get_chat_handle = silence_stdout_fn(get_chat_handle)
 65 | vt.get_chat_default_kwargs = silence_stdout_fn(get_chat_default_kwargs)
 66 | vt.chat_to_markdown_str = chat_to_markdown_str
 67 | (
 68 |     proxies,
 69 |     WEB_PORT,
 70 |     LLM_MODEL,
 71 |     CONCURRENT_COUNT,
 72 |     AUTHENTICATION,
 73 |     CHATBOT_HEIGHT,
 74 |     LAYOUT,
 75 |     API_KEY,
 76 | ) = vt.get_conf(
 77 |     "proxies",
 78 |     "WEB_PORT",
 79 |     "LLM_MODEL",
 80 |     "CONCURRENT_COUNT",
 81 |     "AUTHENTICATION",
 82 |     "CHATBOT_HEIGHT",
 83 |     "LAYOUT",
 84 |     "API_KEY",
 85 | )
 86 | 
 87 | 
 88 | def plugin_test(main_input, plugin, advanced_arg=None, debug=True):
 89 |     from rich.live import Live
 90 |     from rich.markdown import Markdown
 91 | 
 92 |     vt.set_conf(key="API_KEY", value=API_KEY)
 93 |     vt.set_conf(key="LLM_MODEL", value=LLM_MODEL)
 94 | 
 95 |     plugin = vt.get_plugin_handle(plugin)
 96 |     plugin_kwargs = vt.get_plugin_default_kwargs()
 97 |     plugin_kwargs["main_input"] = main_input
 98 |     if advanced_arg is not None:
 99 |         plugin_kwargs["plugin_kwargs"] = advanced_arg
100 |     if debug:
101 |         my_working_plugin = (plugin)(**plugin_kwargs)
102 |     else:
103 |         my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
104 | 
105 |     with Live(Markdown(""), auto_refresh=False, vertical_overflow="visible") as live:
106 |         for cookies, chat, hist, msg in my_working_plugin:
107 |             md_str = vt.chat_to_markdown_str(chat)
108 |             md = Markdown(md_str)
109 |             live.update(md, refresh=True)
110 | 


--------------------------------------------------------------------------------
/tests/test_vector_plugins.py:
--------------------------------------------------------------------------------
 1 | """
 2 | 对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
 3 | """
 4 | 
 5 | 
 6 | import os, sys
 7 | 
 8 | 
 9 | def validate_path():
10 |     dir_name = os.path.dirname(__file__)
11 |     root_dir_assume = os.path.abspath(dir_name + "/..")
12 |     os.chdir(root_dir_assume)
13 |     sys.path.append(root_dir_assume)
14 | 
15 | 
16 | validate_path()  # 返回项目根路径
17 | 
18 | if __name__ == "__main__":
19 |     from tests.test_utils import plugin_test
20 | 
21 |     plugin_test(plugin="crazy_functions.知识库问答->知识库文件注入", main_input="./README.md")
22 | 
23 |     plugin_test(
24 |         plugin="crazy_functions.知识库问答->读取知识库作答",
25 |         main_input="What is the installation method?",
26 |     )
27 | 
28 |     plugin_test(plugin="crazy_functions.知识库问答->读取知识库作答", main_input="远程云服务器部署?")
29 | 


--------------------------------------------------------------------------------
/themes/base64.mjs:
--------------------------------------------------------------------------------
1 | // we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
2 | 


--------------------------------------------------------------------------------
/themes/common.py:
--------------------------------------------------------------------------------
 1 | from functools import lru_cache
 2 | from toolbox import get_conf
 3 | CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
 4 | 
 5 | def inject_mutex_button_code(js_content):
 6 |     from crazy_functional import get_multiplex_button_functions
 7 |     fns = get_multiplex_button_functions()
 8 | 
 9 |     template = """
10 |     if (multiplex_sel === "{x}") {
11 |         let _align_name_in_crazy_function_py = "{y}";
12 |         call_plugin_via_name(_align_name_in_crazy_function_py);
13 |         return;
14 |     }
15 |     """
16 | 
17 |     replacement = ""
18 |     for fn in fns.keys():
19 |         if fn == "常规对话": continue
20 |         replacement += template.replace("{x}", fn).replace("{y}", fns[fn])
21 |     js_content = js_content.replace("// REPLACE_EXTENDED_MULTIPLEX_FUNCTIONS_HERE", replacement)
22 |     return js_content
23 | 
24 | def minimize_js(common_js_path):
25 |     try:
26 |         import rjsmin, hashlib, glob, os
27 |         # clean up old minimized js files, matching `common_js_path + '.min.*'`
28 |         for old_min_js in glob.glob(common_js_path + '.min.*.js'):
29 |             os.remove(old_min_js)
30 |         # use rjsmin to minimize `common_js_path`
31 |         c_jsmin = rjsmin.jsmin
32 |         with open(common_js_path, "r", encoding='utf-8') as f:
33 |             js_content = f.read()
34 |         if common_js_path == "themes/common.js":
35 |             js_content = inject_mutex_button_code(js_content)
36 |         minimized_js_content = c_jsmin(js_content)
37 |         # compute sha256 hash of minimized js content
38 |         sha_hash = hashlib.sha256(minimized_js_content.encode()).hexdigest()[:8]
39 |         minimized_js_path = common_js_path + '.min.' + sha_hash + '.js'
40 |         # save to minimized js file
41 |         with open(minimized_js_path, "w", encoding='utf-8') as f:
42 |             f.write(minimized_js_content)
43 |         # return minimized js file path
44 |         return minimized_js_path
45 |     except:
46 |         return common_js_path
47 | 
48 | @lru_cache
49 | def get_common_html_javascript_code():
50 |     js = "\n"
51 |     common_js_path_list = [
52 |         "themes/common.js",
53 |         "themes/theme.js",
54 |         "themes/tts.js",
55 |         "themes/init.js",
56 |         "themes/welcome.js",
57 |     ]
58 | 
59 |     if ADD_WAIFU: # 添加Live2D
60 |         common_js_path_list += [
61 |             "themes/waifu_plugin/jquery.min.js",
62 |             "themes/waifu_plugin/jquery-ui.min.js",
63 |         ]
64 | 
65 |     for common_js_path in common_js_path_list:
66 |         if '.min.' not in common_js_path:
67 |             minimized_js_path = minimize_js(common_js_path)
68 |         else:
69 |             minimized_js_path = common_js_path
70 |         jsf = f"file={minimized_js_path}"
71 |         js += f"""<script src="{jsf}"></script>\n"""
72 | 
73 |     if not ADD_WAIFU:
74 |         js += """<script>window.loadLive2D = function(){};</script>\n"""
75 | 
76 |     return js
77 | 


--------------------------------------------------------------------------------
/themes/cookies.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/themes/cookies.py


--------------------------------------------------------------------------------
/themes/gradios.py:
--------------------------------------------------------------------------------
 1 | import os
 2 | import gradio as gr
 3 | from toolbox import get_conf, ProxyNetworkActivate
 4 | from loguru import logger
 5 | 
 6 | CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
 7 | theme_dir = os.path.dirname(__file__)
 8 | 
 9 | 
10 | def dynamic_set_theme(THEME):
11 |     set_theme = gr.themes.ThemeClass()
12 |     with ProxyNetworkActivate("Download_Gradio_Theme"):
13 |         logger.info("正在下载Gradio主题,请稍等。")
14 |         try:
15 |             if THEME.startswith("Huggingface-"):
16 |                 THEME = THEME.lstrip("Huggingface-")
17 |             if THEME.startswith("huggingface-"):
18 |                 THEME = THEME.lstrip("huggingface-")
19 |             set_theme = set_theme.from_hub(THEME.lower())
20 |         except:
21 |             logger.error("下载Gradio主题时出现异常。")
22 |     return set_theme
23 | 
24 | 
25 | def adjust_theme():
26 |     try:
27 |         set_theme = gr.themes.ThemeClass()
28 |         with ProxyNetworkActivate("Download_Gradio_Theme"):
29 |             logger.info("正在下载Gradio主题,请稍等。")
30 |             try:
31 |                 THEME = get_conf("THEME")
32 |                 if THEME.startswith("Huggingface-"):
33 |                     THEME = THEME.lstrip("Huggingface-")
34 |                 if THEME.startswith("huggingface-"):
35 |                     THEME = THEME.lstrip("huggingface-")
36 |                 set_theme = set_theme.from_hub(THEME.lower())
37 |             except:
38 |                 logger.error("下载Gradio主题时出现异常。")
39 | 
40 |         from themes.common import get_common_html_javascript_code
41 |         js = get_common_html_javascript_code()
42 |         
43 |         if not hasattr(gr, "RawTemplateResponse"):
44 |             gr.RawTemplateResponse = gr.routes.templates.TemplateResponse
45 |         gradio_original_template_fn = gr.RawTemplateResponse
46 | 
47 |         def gradio_new_template_fn(*args, **kwargs):
48 |             res = gradio_original_template_fn(*args, **kwargs)
49 |             res.body = res.body.replace(b"</html>", f"{js}</html>".encode("utf8"))
50 |             res.init_headers()
51 |             return res
52 | 
53 |         gr.routes.templates.TemplateResponse = (
54 |             gradio_new_template_fn  # override gradio template
55 |         )
56 |     except Exception:
57 |         set_theme = None
58 |         logger.error("gradio版本较旧, 不能自定义字体和颜色。")
59 |     return set_theme
60 | 
61 | 
62 | with open(os.path.join(theme_dir, "common.css"), "r", encoding="utf-8") as f:
63 |     advanced_css = f.read()
64 | 


--------------------------------------------------------------------------------
/themes/green.js:
--------------------------------------------------------------------------------
 1 | 
 2 | var academic_chat = null;
 3 | 
 4 | var sliders = null;
 5 | var rangeInputs = null;
 6 | var numberInputs = null;
 7 | 
 8 | function set_elements() {
 9 |     academic_chat = document.querySelector('gradio-app');
10 |     async function get_sliders() {
11 |         sliders = document.querySelectorAll('input[type="range"]');
12 |         while (sliders.length == 0) {
13 |             await new Promise(r => setTimeout(r, 100));
14 |             sliders = document.querySelectorAll('input[type="range"]');
15 |         }
16 |         setSlider();
17 |     }
18 |     get_sliders();
19 | }
20 | 
21 | function setSlider() {
22 |     rangeInputs = document.querySelectorAll('input[type="range"]');
23 |     numberInputs = document.querySelectorAll('input[type="number"]')
24 |     function setSliderRange() {
25 |         var range = document.querySelectorAll('input[type="range"]');
26 |         range.forEach(range => {
27 |             range.style.backgroundSize = (range.value - range.min) / (range.max - range.min) * 100 + '% 100%';
28 |         });
29 |     }
30 |     setSliderRange();
31 |     rangeInputs.forEach(rangeInput => {
32 |         rangeInput.addEventListener('input', setSliderRange);
33 |     });
34 |     numberInputs.forEach(numberInput => {
35 |         numberInput.addEventListener('input', setSliderRange);
36 |     })
37 | }
38 | 
39 | window.addEventListener("DOMContentLoaded", () => {
40 |     set_elements();
41 | });
42 | 


--------------------------------------------------------------------------------
/themes/gui_advanced_plugin_class.py:
--------------------------------------------------------------------------------
 1 | import gradio as gr
 2 | import json
 3 | from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
 4 | 
 5 | def define_gui_advanced_plugin_class(plugins):
 6 |     # 定义新一代插件的高级参数区
 7 |     with gr.Floating(init_x="50%", init_y="50%", visible=False, width="30%", drag="top", elem_id="plugin_arg_menu"):
 8 |         with gr.Accordion("选择插件参数", open=True, elem_id="plugin_arg_panel"):
 9 |             for u in range(8):
10 |                 with gr.Row():
11 |                     gr.Textbox(show_label=True, label="T1", placeholder="请输入", lines=1, visible=False, elem_id=f"plugin_arg_txt_{u}").style(container=False)
12 |             for u in range(8):
13 |                 with gr.Row(): # PLUGIN_ARG_MENU
14 |                     gr.Dropdown(label="T1", value="请选择", choices=[], visible=True, elem_id=f"plugin_arg_drop_{u}", interactive=True)
15 | 
16 |             with gr.Row():
17 |                 # 这个隐藏textbox负责装入当前弹出插件的属性
18 |                 gr.Textbox(show_label=False, placeholder="请输入", lines=1, visible=False,
19 |                         elem_id=f"invisible_current_pop_up_plugin_arg").style(container=False)
20 |                 usr_confirmed_arg = gr.Textbox(show_label=False, placeholder="请输入", lines=1, visible=False,
21 |                         elem_id=f"invisible_current_pop_up_plugin_arg_final").style(container=False)
22 | 
23 |                 arg_confirm_btn = gr.Button("确认参数并执行", variant="stop")
24 |                 arg_confirm_btn.style(size="sm")
25 | 
26 |                 arg_cancel_btn = gr.Button("取消", variant="stop")
27 |                 arg_cancel_btn.click(None, None, None, _js="""()=>close_current_pop_up_plugin()""")
28 |                 arg_cancel_btn.style(size="sm")
29 | 
30 |                 arg_confirm_btn.click(None, None, None, _js="""()=>execute_current_pop_up_plugin()""")
31 |                 invisible_callback_btn_for_plugin_exe = gr.Button(r"未选定任何插件", variant="secondary", visible=False, elem_id="invisible_callback_btn_for_plugin_exe").style(size="sm")
32 |                 # 随变按钮的回调函数注册
33 |                 def route_switchy_bt_with_arg(request: gr.Request, input_order, *arg):
34 |                     arguments = {k:v for k,v in zip(input_order, arg)}      # 重新梳理输入参数,转化为kwargs字典
35 |                     which_plugin = arguments.pop('new_plugin_callback')     # 获取需要执行的插件名称
36 |                     if which_plugin in [r"未选定任何插件"]: return
37 |                     usr_confirmed_arg = arguments.pop('usr_confirmed_arg')  # 获取插件参数
38 |                     arg_confirm: dict = {}
39 |                     usr_confirmed_arg_dict = json.loads(usr_confirmed_arg)  # 读取插件参数
40 |                     for arg_name in usr_confirmed_arg_dict:
41 |                         arg_confirm.update({arg_name: str(usr_confirmed_arg_dict[arg_name]['user_confirmed_value'])})
42 | 
43 |                     if plugins[which_plugin].get("Class", None) is not None:  # 获取插件执行函数
44 |                         plugin_obj = plugins[which_plugin]["Class"]
45 |                         plugin_exe = plugin_obj.execute
46 |                     else:
47 |                         plugin_exe = plugins[which_plugin]["Function"]
48 | 
49 |                     arguments['plugin_advanced_arg'] = arg_confirm          # 更新高级参数输入区的参数
50 |                     if arg_confirm.get('main_input', None) is not None:     # 更新主输入区的参数
51 |                         arguments['txt'] = arg_confirm['main_input']
52 | 
53 |                     # 万事俱备,开始执行
54 |                     yield from ArgsGeneralWrapper(plugin_exe)(request, *arguments.values())
55 | 
56 |     return invisible_callback_btn_for_plugin_exe, route_switchy_bt_with_arg, usr_confirmed_arg
57 | 
58 | 


--------------------------------------------------------------------------------
/themes/svg/arxiv.svg:
--------------------------------------------------------------------------------
1 | <?xml version="1.0"?>
2 | <svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
3 |  <g class="layer">
4 |   <title>Layer 1</title>
5 |   <path d="m140,188l584,0l0,164l76,0l0,-208c0,-17.7 -14.3,-32 -32,-32l-672,0c-17.7,0 -32,14.3 -32,32l0,736c0,17.7 14.3,32 32,32l544,0l0,-76l-500,0l0,-648zm274.3,68l-60.6,0c-3.4,0 -6.4,2.2 -7.6,5.4l-127.1,368c-0.3,0.8 -0.4,1.7 -0.4,2.6c0,4.4 3.6,8 8,8l55.1,0c3.4,0 6.4,-2.2 7.6,-5.4l32.7,-94.6l196.2,0l-96.2,-278.6c-1.3,-3.2 -4.3,-5.4 -7.7,-5.4zm12.4,228l-85.5,0l42.8,-123.8l42.7,123.8zm509.3,44l-136,0l0,-93c0,-4.4 -3.6,-8 -8,-8l-56,0c-4.4,0 -8,3.6 -8,8l0,93l-136,0c-13.3,0 -24,10.7 -24,24l0,176c0,13.3 10.7,24 24,24l136,0l0,152c0,4.4 3.6,8 8,8l56,0c4.4,0 8,-3.6 8,-8l0,-152l136,0c13.3,0 24,-10.7 24,-24l0,-176c0,-13.3 -10.7,-24 -24,-24zm-208,152l-88,0l0,-80l88,0l0,80zm160,0l-88,0l0,-80l88,0l0,80z" fill="#00aeff" id="svg_1"/>
6 |  </g>
7 | </svg>


--------------------------------------------------------------------------------
/themes/svg/box.svg:
--------------------------------------------------------------------------------
1 | <svg t="1722787038697" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="7850" width="200" height="200"><path d="M902.095238 390.095238l-0.024381 340.041143-0.073143-4.583619A170.666667 170.666667 0 1 0 731.257905 902.095238H195.047619a73.142857 73.142857 0 0 1-73.142857-73.142857V390.095238h780.190476z m-175.542857 219.428572a117.028571 117.028571 0 0 1 101.13219 175.957333l70.217143 70.217143-41.374476 41.374476-70.022095-69.997714A117.028571 117.028571 0 1 1 726.552381 609.52381z m0 73.142857a43.885714 43.885714 0 1 0 0 87.771428 43.885714 43.885714 0 0 0 0-87.771428zM463.238095 438.857143h-195.047619v73.142857h195.047619v-73.142857zM765.123048 121.904762a73.142857 73.142857 0 0 1 65.414095 40.423619l63.829333 127.634286A73.142857 73.142857 0 0 1 901.87581 316.952381H122.831238a73.142857 73.142857 0 0 1 6.339048-20.138667l64.560762-133.607619A73.142857 73.142857 0 0 1 259.584 121.904762h505.539048z" p-id="7851" fill="#1296db"></path></svg>


--------------------------------------------------------------------------------
/themes/svg/brain.svg:
--------------------------------------------------------------------------------
1 | <?xml version="1.0"?>
2 | <svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
3 |  <g class="layer">
4 |   <title>Layer 1</title>
5 |   <path d="m832,96a96,96 0 1 1 -90.72,127.52l-1.54,0.26l-3.74,0.22l-256,0l0,256l256,0c1.82,0 3.58,0.16 5.31,0.45a96,96 0 1 1 0,63.07l-1.6,0.26l-3.71,0.22l-256,0l0,256l256,0c1.82,0 3.58,0.16 5.31,0.45a96,96 0 1 1 0,63.07l-1.6,0.26l-3.71,0.22l-288,0a32,32 0 0 1 -31.78,-28.26l-0.22,-3.74l0,-288l-68.03,0a128.06,128.06 0 0 1 -117.57,95.87l-6.4,0.13a128,128 0 1 1 123.97,-160l68.03,0l0,-288a32,32 0 0 1 28.26,-31.78l3.74,-0.22l288,0c1.82,0 3.58,0.16 5.31,0.45a96,96 0 0 1 90.69,-64.45zm0,704a32,32 0 1 0 0,64a32,32 0 0 0 0,-64zm-608,-352a64,64 0 1 0 0,128a64,64 0 0 0 0,-128zm608,32a32,32 0 1 0 0,64a32,32 0 0 0 0,-64zm0,-320a32,32 0 1 0 0,64a32,32 0 0 0 0,-64z" fill="#00aeff" id="svg_1"/>
6 |   <path d="m224,384a128,128 0 1 1 0,256a128,128 0 0 1 0,-256zm0,64a64,64 0 1 0 0,128a64,64 0 0 0 0,-128z" fill="#00aeff" id="svg_2"/>
7 |  </g>
8 | </svg>


--------------------------------------------------------------------------------
/themes/svg/check.svg:
--------------------------------------------------------------------------------
1 | <svg t="1722786924074" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="7573" width="200" height="200"><path d="M690.176 146.285714c19.870476 0 38.838857 8.338286 52.345905 22.991238l45.519238 49.420191-280.81981 207.993905-162.694095-101.546667a54.51581 54.51581 0 0 0-75.337143 17.798095c-12.190476 19.748571-10.727619 45.056 3.584 63.268572l182.930286 232.545523a71.046095 71.046095 0 0 0 108.495238 4.022858L870.15619 307.833905l49.493334 53.735619a72.240762 72.240762 0 0 1 0.585143 97.084952L564.931048 854.064762a71.046095 71.046095 0 0 1-105.862096 0L103.789714 458.654476a72.240762 72.240762 0 0 1 0.585143-97.084952l177.078857-192.292572A71.143619 71.143619 0 0 1 333.799619 146.285714h356.376381z m176.420571 73.703619a7.143619 7.143619 0 0 1 9.508572 10.532572L529.261714 610.279619a23.79581 23.79581 0 0 1-36.254476-1.340952l-183.027809-232.643048a7.143619 7.143619 0 0 1 9.386666-10.483809l190.171429 118.686476z" p-id="7574" fill="#1296db"></path></svg>


--------------------------------------------------------------------------------
/themes/svg/conf.svg:
--------------------------------------------------------------------------------
1 | <?xml version="1.0"?>
2 | <svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
3 |  <g class="layer">
4 |   <title>Layer 1</title>
5 |   <path d="m373.83,194.49a44.92,44.92 0 1 1 0,-89.84a44.96,44.96 0 1 1 0,89.84m165.43,-163.51l-63.23,0s-189.11,-1.28 -204.17,99.33l0,122.19l241.47,0l0,34.95l-358.3,-0.66s-136.03,9.94 -138.51,206.74l0,57.75s-3.96,190.03 130.12,216.55l92.32,0l0,-128.09a132,132 0 0 1 132.06,-132.03l256.43,0a123.11,123.11 0 0 0 123.18,-123.12l0,-238.62c0,-2.25 -1.19,-103.13 -211.37,-115.02" fill="#00aeff" id="svg_1"/>
6 |   <path d="m647.01,853.16c24.84,0 44.96,20.01 44.96,44.85a44.96,44.96 0 1 1 -44.96,-44.85m-165.43,163.51l63.23,0s189.14,1.22 204.17,-99.36l0,-122.22l-241.47,0l0,-34.95l358.27,0.66s136.06,-9.88 138.54,-206.65l0,-57.74s3.96,-190.04 -130.12,-216.56l-92.32,0l0,128.03a132.06,132.06 0 0 1 -132.06,132.1l-256.47,0a123.11,123.11 0 0 0 -123.14,123.08l0,238.59c0,2.24 1.19,103.16 211.37,115.02" fill="#00aeff" id="svg_2"/>
7 |  </g>
8 | </svg>


--------------------------------------------------------------------------------
/themes/svg/default.svg:
--------------------------------------------------------------------------------
1 | <svg t="1721122982934" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="1823" width="200" height="200"><path d="M512 512m-512 0a512 512 0 1 0 1024 0 512 512 0 1 0-1024 0Z" fill="#04BEE8" p-id="1824"></path><path d="M324.408781 655.018925C505.290126 655.018925 651.918244 508.387706 651.918244 327.509463c0-152.138029-103.733293-280.047334-244.329811-316.853972C205.813923 52.463528 47.497011 213.017581 8.987325 415.981977 47.587706 553.880127 174.183098 655.018925 324.408781 655.018925z" fill="#FFFFFF" fill-opacity=".2" p-id="1825"></path><path d="M512 1024c282.766631 0 512-229.233369 512-512 0-31.765705-2.891385-62.853911-8.433853-93.018889C928.057169 336.0999 809.874701 285.26268 679.824375 285.26268c-269.711213 0-488.357305 218.645317-488.357305 488.357305 0 54.959576 9.084221 107.802937 25.822474 157.10377C300.626556 989.489417 402.283167 1024 512 1024z" fill="#FFFFFF" fill-opacity=".15" p-id="1826"></path><path d="M732.535958 756.566238c36.389596 0 65.889478-29.499882 65.889477-65.889478 0 36.389596 29.502983 65.889478 65.889478 65.889478-17.053747 0-65.889478 29.502983-65.889478 65.889477 0-36.386495-29.499882-65.889478-65.889477-65.889477zM159.685087 247.279334c25.686819 0 46.51022-20.8234 46.51022-46.51022 0 25.686819 20.8234 46.51022 46.510219 46.51022-12.03607 0-46.51022 20.8234-46.510219 46.510219 0-25.686819-20.8234-46.51022-46.51022-46.510219z" fill="#FFFFFF" fill-opacity=".5" p-id="1827"></path><path d="M206.195307 333.32324c8.562531 0 15.503407-6.940875 15.503406-15.503407 0 8.562531 6.940875 15.503407 15.503407 15.503407-4.012282 0-15.503407 6.940875-15.503407 15.503406 0-8.562531-6.940875-15.503407-15.503406-15.503406z" fill="#FFFFFF" fill-opacity=".3" p-id="1828"></path><path d="M282.161998 248.054504m80.617714 0l299.215746 0q80.617714 0 80.617714 80.617714l0 366.380379q0 80.617714-80.617714 80.617713l-299.215746 0q-80.617714 0-80.617714-80.617713l0-366.380379q0-80.617714 80.617714-80.617714Z" fill="#FFFFFF" p-id="1829"></path><path d="M530.216503 280.611658h113.433774v146.467658c0 10.89967-13.049992 16.498725-20.948978 8.9881l-35.767909-34.009048-35.767909 34.009048C543.266495 443.578041 530.216503 437.978986 530.216503 427.079316V280.611658z" fill="#29C8EB" p-id="1830"></path><path d="M365.105223 280.611658m14.728237 0l0 0q14.728236 0 14.728236 14.728236l0 417.041635q0 14.728236-14.728236 14.728236l0 0q-14.728236 0-14.728237-14.728236l0-417.041635q0-14.728236 14.728237-14.728236Z" fill="#29C8EB" p-id="1831"></path></svg>


--------------------------------------------------------------------------------
/themes/svg/doc.svg:
--------------------------------------------------------------------------------
1 | <?xml version="1.0"?>
2 | <svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
3 |  <g class="layer">
4 |   <title>Layer 1</title>
5 |   <path d="m298.67,96a32,32 0 0 1 0,64a138.67,138.67 0 0 0 -138.67,138.67l0,42.66a32,32 0 0 1 -64,0l0,-42.66a202.67,202.67 0 0 1 202.67,-202.67zm597.33,554.67a32,32 0 0 1 32,32l0,42.66a202.67,202.67 0 0 1 -202.67,202.67l-42.66,0a32,32 0 0 1 0,-64l42.66,0a138.67,138.67 0 0 0 138.67,-138.67l0,-42.66a32,32 0 0 1 32,-32zm-128,-405.34a32,32 0 0 1 0,64l-213.33,0a32,32 0 0 1 0,-64l213.33,0zm0,128a32,32 0 0 1 0,64l-128,0a32,32 0 0 1 0,-64l128,0z" fill="#00aeff" id="svg_1"/>
6 |   <path d="m780.8,96a138.67,138.67 0 0 1 138.67,138.67l0,213.33a138.67,138.67 0 0 1 -138.67,138.67l-98.13,0a32,32 0 0 1 0,-64l98.13,0a74.67,74.67 0 0 0 74.67,-74.67l0,-213.33a74.67,74.67 0 0 0 -74.67,-74.67l-247.47,0a74.67,74.67 0 0 0 -74.66,74.67l0,106.66a32,32 0 0 1 -64,0l0,-106.66a138.67,138.67 0 0 1 138.66,-138.67l247.47,0zm-487.68,500.05a32,32 0 0 1 45.23,0l64,64a32,32 0 0 1 0,45.23l-64,64a32,32 0 0 1 -45.23,-45.23l41.34,-41.38l-41.38,-41.39a32,32 0 0 1 -3.07,-41.64l3.11,-3.59z" fill="#00aeff" id="svg_2"/>
7 |   <path d="m448,437.33a138.67,138.67 0 0 1 138.67,138.67l0,213.33a138.67,138.67 0 0 1 -138.67,138.67l-213.33,0a138.67,138.67 0 0 1 -138.67,-138.67l0,-213.33a138.67,138.67 0 0 1 138.67,-138.67l213.33,0zm0,64l-213.33,0a74.67,74.67 0 0 0 -74.67,74.67l0,213.33c0,41.22 33.45,74.67 74.67,74.67l213.33,0a74.67,74.67 0 0 0 74.67,-74.67l0,-213.33a74.67,74.67 0 0 0 -74.67,-74.67z" fill="#00aeff" id="svg_3"/>
8 |  </g>
9 | </svg>


--------------------------------------------------------------------------------
/themes/svg/img.svg:
--------------------------------------------------------------------------------
 1 | <?xml version="1.0"?>
 2 | <svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
 3 |  <g class="layer">
 4 |   <title>Layer 1</title>
 5 |   <path d="m960,234l0,556c0,5.52 -4.48,10 -10,10l-438,0l0,-576l438,0c5.52,0 10,4.48 10,10z" fill="#F7FEFE" id="svg_1"/>
 6 |   <path d="m931.93,800l-419.93,0l0,-202.03l114.19,-114.19c5.21,-5.21 13.71,-5 18.66,0.44l287.08,315.78z" fill="#3404FC" id="svg_2"/>
 7 |   <path d="m512,800l256,0l-256,-275.69l0,275.69z" fill="#0097FF" id="svg_3"/>
 8 |   <path d="m848,336m-48,0a48,48 0 1 0 96,0a48,48 0 1 0 -96,0z" fill="#EC1C36" id="svg_4"/>
 9 |   <path d="m305.29,242.44m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_5"/>
10 |   <path d="m112,320m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_6"/>
11 |   <path d="m96,512m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_7"/>
12 |   <path d="m305.29,781.56m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_8"/>
13 |   <path d="m112,704m-32,0a32,32 0 1 0 64,0a32,32 0 1 0 -64,0z" fill="#00aeff" id="svg_9"/>
14 |   <path d="m950,816.31l-438,0l0,-32l432,0l0,-544.62l-432,0l0,-32l438,0c14.34,0 26,11.66 26,26l0,556.62c0,14.34 -11.66,26 -26,26zm-630,-192.31c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-38.63,0l-96,96l-11.4,0c-7.12,-27.57 -32.21,-48 -61.97,-48c-35.29,0 -64,28.71 -64,64s28.71,64 64,64c29.77,0 54.85,-20.43 61.97,-48l24.65,0l96.01,-96l25.37,0zm-208,112c-17.67,0 -32,-14.33 -32,-32s14.33,-32 32,-32s32,14.33 32,32s-14.33,32 -32,32zm288,-304c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-32,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l32,0zm-80,80c0,8.84 7.16,16 16,16l32,0c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-32,0c-8.84,0 -16,7.16 -16,16zm32,96c0,8.84 7.16,16 16,16l32,0c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-32,0c-8.84,0 -16,7.16 -16,16zm-240,-224c29.77,0 54.85,-20.43 61.97,-48l11.4,0l96,96l38.63,0c8.84,0 16,-7.16 16,-16s-7.16,-16 -16,-16l-25.37,0l-96,-96l-24.65,0c-7.12,-27.57 -32.21,-48 -61.97,-48c-35.29,0 -64,28.71 -64,64s28.7,64 63.99,64zm0,-96c17.67,0 32,14.33 32,32s-14.33,32 -32,32s-32,-14.33 -32,-32s14.33,-32 32,-32zm-16,288c29.77,0 54.85,-20.43 61.97,-48l133.43,0c6.96,0 12.59,-7.16 12.59,-16s-5.64,-16 -12.59,-16l-133.43,0c-7.12,-27.57 -32.21,-48 -61.97,-48c-35.29,0 -64,28.71 -64,64s28.71,64 64,64zm0,-96c17.67,0 32,14.33 32,32s-14.33,32 -32,32s-32,-14.33 -32,-32s14.33,-32 32,-32zm384,-416c-8.84,0 -16,7.16 -16,16l0,224l-73.37,0l-29.8,-29.79a63.62,63.62 0 0 0 8.46,-31.77c0,-35.29 -28.71,-64 -64,-64s-64,28.71 -64,64s28.71,64 64,64c12.16,0 23.53,-3.41 33.21,-9.31l38.87,38.87l86.63,0l0,64l-16,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l16,0l0,64l-48,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l48,0l0,64l-16,0c-8.84,0 -16,7.16 -16,16s7.16,16 16,16l16,0l0,64l-86.63,0l-38.87,38.87a63.61,63.61 0 0 0 -33.21,-9.31c-35.29,0 -64,28.71 -64,64s28.71,64 64,64s64,-28.71 64,-64c0,-11.55 -3.08,-22.4 -8.46,-31.77l29.8,-29.79l73.37,0l0,224c0,8.84 7.16,16 16,16s16,-7.16 16,-16l0,-864c0,-8.84 -7.16,-16 -16,-16zm-143.57,185.81c-2.62,11.14 -11.07,20.03 -21.95,23.29c-2.91,0.87 -6,1.34 -9.19,1.34c-17.68,0 -32,-14.33 -32,-32c0,-17.68 14.32,-32 32,-32c17.67,0 32,14.32 32,32c0,2.54 -0.29,5 -0.86,7.37zm-31.14,563.75c-17.68,0 -32,-14.32 -32,-32c0,-17.67 14.32,-32 32,-32c3.19,0 6.28,0.47 9.19,1.34c10.88,3.26 19.33,12.15 21.95,23.29c0.57,2.37 0.86,4.83 0.86,7.37c0,17.68 -14.33,32 -32,32z" fill="#00aeff" id="svg_10"/>
15 |  </g>
16 | </svg>


--------------------------------------------------------------------------------
/themes/svg/location.svg:
--------------------------------------------------------------------------------
1 | <svg t="1722787072441" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="8044" width="200" height="200"><path d="M800.914286 585.142857c78.750476 0 142.628571 62.049524 142.628571 138.605714l-0.097524 4.876191c-1.755429 45.83619-27.794286 97.328762-70.582857 149.577143-15.969524 19.504762-33.133714 37.692952-50.273524 54.02819l-8.630857 8.045715a517.12 517.12 0 0 1-4.047238 3.657142l-8.996571 7.850667-5.168762-4.437333a602.063238 602.063238 0 0 1-66.779429-69.144381c-42.76419-52.248381-68.827429-103.740952-70.582857-149.552762l-0.073143-2.438095-0.024381-2.438096C658.285714 647.192381 722.16381 585.142857 800.914286 585.142857zM121.904762 330.947048l359.619048 154.575238v451.779047L165.351619 796.94019A73.142857 73.142857 0 0 1 121.904762 730.063238v-399.11619z m780.190476-9.337905l0.024381 243.126857A193.26781 193.26781 0 0 0 800.914286 536.380952C695.515429 536.380952 609.52381 619.958857 609.52381 723.748571l0.121904 6.290286c1.852952 48.444952 22.893714 99.035429 59.928381 150.820572l-114.907428 51.029333V481.694476L902.095238 321.584762zM800.914286 658.285714c-37.400381 0-67.193905 27.623619-69.36381 61.537524l-0.121905 3.925333 0.048762 2.072381c0.975238 25.770667 18.919619 63.097905 54.076953 106.057143 4.924952 5.997714 10.044952 11.946667 15.36 17.846857l7.801904-8.899047 7.558096-8.972191c33.304381-40.667429 51.151238-76.312381 53.784381-101.863619l0.292571-4.144762 0.048762-2.096762c0-35.669333-30.646857-65.462857-69.485714-65.462857z m0 28.745143a40.740571 40.740571 0 1 1 0 81.481143 40.740571 40.740571 0 0 1 0-81.481143zM541.696 86.308571l316.952381 140.751239c11.02019 4.87619 20.406857 12.312381 27.599238 21.382095l-371.151238 171.885714-382.537143-164.425143c7.558095-12.483048 18.895238-22.674286 32.792381-28.842666l316.952381-140.751239a73.142857 73.142857 0 0 1 59.392 0z" p-id="8045" fill="#1296db"></path></svg>


--------------------------------------------------------------------------------
/themes/svg/mm.svg:
--------------------------------------------------------------------------------
 1 | <?xml version="1.0"?>
 2 | <svg width="1066" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
 3 |  <g class="layer">
 4 |   <title>Layer 1</title>
 5 |   <path d="m443.9,511.45a96,96 0 1 0 192,0a96,96 0 0 0 -192,0zm96,32a32,32 0 1 1 0,-64a32,32 0 0 1 0,64z" fill="#00aeff" id="svg_1"/>
 6 |   <path d="m74.67,512c0,80.17 65.87,142.08 147.54,181.67c84.26,40.84 198.06,65.07 321.7,65.07c123.74,0 237.49,-24.23 321.75,-65.07c81.71,-39.63 147.59,-101.54 147.59,-181.71c0,-80.22 -65.88,-142.08 -147.59,-181.68c-84.26,-40.87 -198.05,-65.11 -321.7,-65.11c-123.69,0 -237.49,24.24 -321.71,65.11c-81.71,39.6 -147.58,101.51 -147.58,181.68l0,0.04zm469.29,172.07c-114.9,0 -217.09,-22.61 -289.15,-57.6c-74.67,-36.18 -105.48,-79.01 -105.48,-114.47c0,-35.46 30.85,-78.29 105.48,-114.52c72.06,-34.94 174.25,-57.6 289.15,-57.6c114.9,0 217.09,22.66 289.15,57.6c74.67,36.23 105.47,79.06 105.47,114.52c0,35.5 -30.85,78.34 -105.47,114.52c-72.11,34.98 -174.25,57.6 -289.15,57.6l0,-0.05z" fill="#00aeff" id="svg_2"/>
 7 |   <path d="m300.2,705.19c-5.97,82.74 15.7,130.86 46.42,148.57c30.72,17.75 83.25,12.46 151.9,-34.09c66.3,-44.93 137.04,-122.07 194.47,-221.57c57.47,-99.5 88.92,-199.34 94.72,-279.21c5.98,-82.77 -15.74,-130.86 -46.46,-148.61c-30.72,-17.75 -83.2,-12.46 -151.9,34.09c-66.3,44.93 -137.04,122.12 -194.47,221.61c-57.43,99.5 -88.92,199.3 -94.72,279.21l0.04,0zm-74.49,-5.37c6.78,-93.44 42.66,-204.08 104.53,-311.17c61.82,-107.09 139.69,-193.54 217.17,-246.1c75.18,-50.94 161.71,-77.01 231.17,-36.95c69.46,40.11 90.11,128.09 83.59,218.67c-6.75,93.39 -42.67,204.07 -104.54,311.16c-61.82,107.1 -139.69,193.5 -217.17,246.06c-75.18,50.95 -161.71,77.06 -231.17,36.95c-69.46,-40.1 -90.11,-128.08 -83.58,-218.62z" fill="#00aeff" id="svg_3"/>
 8 |   <path d="m300.2,318.85c5.76,79.87 37.21,179.71 94.68,279.21c57.43,99.5 128.17,176.64 194.43,221.61c68.7,46.51 121.18,51.84 151.9,34.09c30.72,-17.75 52.43,-65.88 46.46,-148.61c-5.76,-79.87 -37.25,-179.71 -94.72,-279.21c-57.43,-99.5 -128.13,-176.64 -194.43,-221.61c-68.7,-46.51 -121.18,-51.8 -151.9,-34.09c-30.72,17.75 -52.43,65.88 -46.42,148.61zm-74.49,5.37c-6.53,-90.53 14.12,-178.51 83.58,-218.62c69.46,-40.11 155.99,-13.99 231.13,36.95c77.52,52.52 155.39,138.96 217.21,246.06c61.87,107.09 97.75,217.77 104.54,311.17c6.52,90.53 -14.17,178.51 -83.63,218.62c-69.42,40.11 -155.95,13.99 -231.08,-36.95c-77.53,-52.52 -155.44,-138.96 -217.26,-246.06c-61.83,-107.09 -97.71,-217.77 -104.49,-311.17z" fill="#00aeff" id="svg_4"/>
 9 |  </g>
10 | </svg>


--------------------------------------------------------------------------------
/themes/svg/polish.svg:
--------------------------------------------------------------------------------
1 | <?xml version="1.0"?>
2 | <svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
3 |  <g class="layer">
4 |   <title>Layer 1</title>
5 |   <path d="m671.27,337.36l2.05,1.92a174.18,174.18 0 0 1 0.19,244.18l-383.3,390.22a168.04,168.04 0 0 1 -237.73,1.79l-2.04,-2.05a174.18,174.18 0 0 1 -0.26,-244.12l383.3,-390.15a168.04,168.04 0 0 1 237.73,-1.79l0.06,0zm-165.09,73.2l-383.31,390.22a72.31,72.31 0 0 0 0.07,101.36l0.77,0.83a66.29,66.29 0 0 0 93.87,-0.7l383.3,-390.22a72.31,72.31 0 0 0 0,-101.42l-0.83,-0.77a66.36,66.36 0 0 0 -93.87,0.7zm282.32,209.7a47.35,47.35 0 0 1 0.64,0.45l122.48,72.05c23.04,13.57 30.91,43.07 17.79,66.29a47.35,47.35 0 0 1 -64.63,17.92l-0.58,-0.45l-122.48,-72.05a48.95,48.95 0 0 1 -17.78,-66.29a47.35,47.35 0 0 1 64.63,-17.92l-0.07,0zm187.43,-191.84a48.38,48.38 0 0 1 0,96.69l-140.52,0a48.38,48.38 0 0 1 0,-96.69l140.52,0zm-49.27,-292.63l0.64,0.64a48.82,48.82 0 0 1 0,68.4l-100.66,102.58a46.97,46.97 0 0 1 -66.42,0.64l-0.64,-0.64a48.82,48.82 0 0 1 0,-68.41l100.66,-102.57a46.97,46.97 0 0 1 66.42,-0.64zm-632.55,-35.64a46.97,46.97 0 0 1 0.58,0.63l100.65,102.52a48.82,48.82 0 0 1 0,68.47a46.97,46.97 0 0 1 -66.42,0.64l-0.64,-0.64l-100.65,-102.58a48.82,48.82 0 0 1 0,-68.47a46.97,46.97 0 0 1 66.48,-0.57zm284.57,-100.15c26.23,0 47.48,21.24 47.48,47.48l0,146.92a47.42,47.42 0 1 1 -94.9,0l0,-146.92c0,-26.24 21.31,-47.48 47.42,-47.48z" fill="#00aeff" id="svg_1"/>
6 |  </g>
7 | </svg>


--------------------------------------------------------------------------------
/themes/svg/tts.svg:
--------------------------------------------------------------------------------
1 | <?xml version="1.0"?>
2 | <svg width="1092" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
3 |  <g class="layer">
4 |   <title>Layer 1</title>
5 |   <path d="m1010.55,424.93c0,86.01 -65.36,155.88 -147.04,166.63l-43.63,0c-81.68,-10.75 -147.04,-80.62 -147.04,-166.63l-92.57,0c0,123.63 92.6,231.11 212.41,252.62l0,107.52l87.17,0l0,-107.52c119.81,-21.51 212.42,-123.63 212.42,-252.59l-81.72,0l0,-0.03zm-76.25,-231.16c0,-53.76 -43.56,-91.37 -92.57,-91.37a91.2,91.2 0 0 0 -92.61,91.37l0,91.38l190.64,0l0,-91.38l-5.46,0zm-190.64,231.16c0,53.76 43.59,91.37 92.61,91.37a91.2,91.2 0 0 0 92.6,-91.37l0,-91.38l-185.21,0l0,91.38zm-279.45,-274.23l-139.94,-140.46l-6.83,-6.83l-3.41,-3.41l-3.42,3.41l-6.82,6.86l-20.48,20.55l-3.42,3.42l3.42,6.86l13.65,13.68l75.09,75.37l-153.6,0c-122.88,6.83 -218.45,109.57 -218.45,229.44l0,10.28l51.2,0l0,-10.24c0,-92.5 75.09,-171.28 167.25,-178.11l153.6,0l-75.09,75.33l-10.24,10.28l-3.41,6.82l-3.42,3.45l3.42,3.41l27.3,27.41l3.42,3.42l3.41,-3.42l30.72,-30.82l116.05,-116.43l3.42,-3.41l-3.42,-6.86zm-406.18,383.55l0,130.16l64.85,0l0,-65.06l129.71,0l0,359.59l-64.86,0l0,65.06l194.56,0l0,-65.06l-64.85,0l0,-359.59l129.71,0l0,65.06l64.85,0l0,-130.16l-453.97,0z" fill="#00aeff" id="svg_1"/>
6 |  </g>
7 | </svg>


--------------------------------------------------------------------------------
/themes/svg/vt.svg:
--------------------------------------------------------------------------------
1 | <?xml version="1.0"?>
2 | <svg width="1024" height="1024" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" class="icon" version="1.1">
3 |  <g class="layer">
4 |   <title>Layer 1</title>
5 |   <path d="m256,64a192,192 0 0 0 -192,192l0,512a192,192 0 0 0 192,192l512,0a192,192 0 0 0 192,-192l0,-512a192,192 0 0 0 -192,-192l-512,0zm92.16,416l-158.08,-158.08l67.84,-67.9l169.41,169.34a80,80 0 0 1 0,113.15l-169.41,169.35l-67.84,-67.78l158.08,-158.08zm131.84,160l288,0l0,96l-288,0l0,-96z" fill="#00aeff" id="svg_1"/>
6 |   <path d="m190.08,638.02l158.08,-158.08l-158.08,-158.08l67.84,-67.84l169.41,169.34a80,80 0 0 1 0,113.15l-169.41,169.35l-67.84,-67.78l0,-0.06zm577.92,1.92l-288,0l0,96l288,0l0,-95.94l0,-0.06z" fill="#2951E0" id="svg_2" opacity="0.2"/>
7 |  </g>
8 | </svg>


--------------------------------------------------------------------------------
/themes/theme.js:
--------------------------------------------------------------------------------
 1 | async function try_load_previous_theme(){
 2 |     if (getCookie("js_theme_selection_cookie")) {
 3 |         theme_selection = getCookie("js_theme_selection_cookie");
 4 |         let css = localStorage.getItem('theme-' + theme_selection);
 5 |         if (css) {
 6 |             change_theme(theme_selection, css);
 7 |         }
 8 |     }
 9 | }
10 | 
11 | async function change_theme(theme_selection, css) {
12 |     if (theme_selection.length==0) {
13 |         try_load_previous_theme();
14 |         return;
15 |     }
16 | 
17 |     var existingStyles = document.querySelectorAll("body > gradio-app > div > style")
18 |     for (var i = 0; i < existingStyles.length; i++) {
19 |         var style = existingStyles[i];
20 |         style.parentNode.removeChild(style);
21 |     }
22 |     var existingStyles = document.querySelectorAll("style[data-loaded-css]");
23 |     for (var i = 0; i < existingStyles.length; i++) {
24 |         var style = existingStyles[i];
25 |         style.parentNode.removeChild(style);
26 |     }
27 | 
28 |     setCookie("js_theme_selection_cookie", theme_selection, 3);
29 |     localStorage.setItem('theme-' + theme_selection, css);
30 | 
31 |     var styleElement = document.createElement('style');
32 |     styleElement.setAttribute('data-loaded-css', 'placeholder');
33 |     styleElement.innerHTML = css;
34 |     document.body.appendChild(styleElement);
35 | }
36 | 
37 | 
38 | // // 记录本次的主题切换
39 | // async function change_theme_prepare(theme_selection, secret_css) {
40 | //     setCookie("js_theme_selection_cookie", theme_selection, 3);
41 | // }


--------------------------------------------------------------------------------
/themes/waifu_plugin/autoload.js:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/themes/waifu_plugin/autoload.js


--------------------------------------------------------------------------------
/themes/waifu_plugin/flat-ui-icons-regular.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/themes/waifu_plugin/flat-ui-icons-regular.eot


--------------------------------------------------------------------------------
/themes/waifu_plugin/flat-ui-icons-regular.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/themes/waifu_plugin/flat-ui-icons-regular.ttf


--------------------------------------------------------------------------------
/themes/waifu_plugin/flat-ui-icons-regular.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/binary-husky/gpt_academic/a7a56b5058fc8e69641e113f615aed8ab3a59a64/themes/waifu_plugin/flat-ui-icons-regular.woff


--------------------------------------------------------------------------------
/themes/waifu_plugin/source:
--------------------------------------------------------------------------------
1 | https://github.com/fghrsh/live2d_demo
2 | 


--------------------------------------------------------------------------------
/version:
--------------------------------------------------------------------------------
1 | {
2 |   "version": 3.93,
3 |   "show_feature": true,
4 |   "new_feature": "支持deepseek-reason(r1) <-> 字体和字体大小自定义 <-> 优化前端并修复TTS的BUG <-> 添加时间线回溯功能 <-> 支持chatgpt-4o-latest <-> 增加RAG组件 <-> 升级多合一主提交键"
5 | }
6 | 


--------------------------------------------------------------------------------