├── .circleci ├── config.yml ├── continue_config.yml └── grab_notary_id.py ├── .codespellrc ├── .github ├── ISSUE_TEMPLATE │ ├── bindings-bug.md │ ├── chat-bug.md │ ├── config.yml │ ├── documentation.md │ ├── feature-request.md │ └── other-bug.md ├── pull_request_template.md └── workflows │ ├── close_issues.yml │ └── codespell.yml ├── .gitignore ├── .gitmodules ├── CONTRIBUTING.md ├── LICENSE.txt ├── MAINTAINERS.md ├── README.md ├── common └── common.cmake ├── gpt4all-backend ├── CMakeLists.txt ├── README.md ├── include │ └── gpt4all-backend │ │ ├── llmodel.h │ │ ├── llmodel_c.h │ │ └── sysinfo.h ├── llama.cpp.cmake └── src │ ├── dlhandle.cpp │ ├── dlhandle.h │ ├── llamamodel.cpp │ ├── llamamodel_impl.h │ ├── llmodel.cpp │ ├── llmodel_c.cpp │ ├── llmodel_shared.cpp │ └── utils.h ├── gpt4all-bindings ├── README.md ├── cli │ ├── README.md │ ├── app.py │ └── developer_notes.md ├── python │ ├── .gitignore │ ├── .isort.cfg │ ├── CHANGELOG.md │ ├── LICENSE.txt │ ├── MANIFEST.in │ ├── README.md │ ├── docs │ │ ├── assets │ │ │ ├── add.png │ │ │ ├── add_model_gpt4.png │ │ │ ├── attach_spreadsheet.png │ │ │ ├── baelor.png │ │ │ ├── before_first_chat.png │ │ │ ├── chat_window.png │ │ │ ├── closed_chat_panel.png │ │ │ ├── configure_doc_collection.png │ │ │ ├── disney_spreadsheet.png │ │ │ ├── download.png │ │ │ ├── download_llama.png │ │ │ ├── explore.png │ │ │ ├── explore_models.png │ │ │ ├── favicon.ico │ │ │ ├── good_tyrion.png │ │ │ ├── got_docs_ready.png │ │ │ ├── got_done.png │ │ │ ├── gpt4all_home.png │ │ │ ├── gpt4all_xlsx_attachment.mp4 │ │ │ ├── installed_models.png │ │ │ ├── linux.png │ │ │ ├── local_embed.gif │ │ │ ├── mac.png │ │ │ ├── models_page_icon.png │ │ │ ├── new_docs_annotated.png │ │ │ ├── new_docs_annotated_filled.png │ │ │ ├── new_first_chat.png │ │ │ ├── no_docs.png │ │ │ ├── no_models.png │ │ │ ├── no_models_tiny.png │ │ │ ├── nomic.png │ │ │ ├── obsidian_adding_collection.png │ │ │ ├── obsidian_docs.png │ │ │ ├── obsidian_response.png │ │ │ ├── obsidian_sources.png │ │ │ ├── open_chat_panel.png │ │ │ ├── open_local_docs.png │ │ │ ├── open_sources.png │ │ │ ├── osbsidian_user_interaction.png │ │ │ ├── search_mistral.png │ │ │ ├── search_settings.png │ │ │ ├── spreadsheet_chat.png │ │ │ ├── syrio_snippets.png │ │ │ ├── three_model_options.png │ │ │ ├── ubuntu.svg │ │ │ └── windows.png │ │ ├── css │ │ │ └── custom.css │ │ ├── gpt4all_api_server │ │ │ └── home.md │ │ ├── gpt4all_desktop │ │ │ ├── chat_templates.md │ │ │ ├── chats.md │ │ │ ├── cookbook │ │ │ │ ├── use-local-ai-models-to-privately-chat-with-Obsidian.md │ │ │ │ ├── use-local-ai-models-to-privately-chat-with-One-Drive.md │ │ │ │ ├── use-local-ai-models-to-privately-chat-with-google-drive.md │ │ │ │ └── use-local-ai-models-to-privately-chat-with-microsoft-excel.md │ │ │ ├── localdocs.md │ │ │ ├── models.md │ │ │ ├── quickstart.md │ │ │ └── settings.md │ │ ├── gpt4all_help │ │ │ ├── faq.md │ │ │ └── troubleshooting.md │ │ ├── gpt4all_python │ │ │ ├── home.md │ │ │ ├── monitoring.md │ │ │ └── ref.md │ │ ├── index.md │ │ └── old │ │ │ ├── gpt4all_chat.md │ │ │ ├── gpt4all_cli.md │ │ │ ├── gpt4all_faq.md │ │ │ ├── gpt4all_monitoring.md │ │ │ ├── gpt4all_nodejs.md │ │ │ ├── gpt4all_python.md │ │ │ ├── gpt4all_python_embedding.md │ │ │ └── index.md │ ├── gpt4all │ │ ├── __init__.py │ │ ├── _pyllmodel.py │ │ ├── gpt4all.py │ │ └── tests │ │ │ ├── __init__.py │ │ │ ├── test_embed_timings.py │ │ │ └── test_gpt4all.py │ ├── makefile │ ├── mkdocs.yml │ └── setup.py └── typescript │ ├── .clang-format │ ├── .gitignore │ ├── .npmignore │ ├── .yarnrc.yml │ ├── README.md │ ├── binding.ci.gyp │ ├── binding.gyp │ ├── index.cc │ ├── index.h │ ├── package.json │ ├── prompt.cc │ ├── prompt.h │ ├── scripts │ ├── build.js │ ├── build_mingw.ps1 │ ├── build_msvc.bat │ ├── build_unix.sh │ ├── docs.js │ ├── mkclangd.js │ └── prebuild.js │ ├── spec │ ├── callbacks.mjs │ ├── chat-memory.mjs │ ├── chat-minimal.mjs │ ├── concurrency.mjs │ ├── embed-jsonl.mjs │ ├── embed.mjs │ ├── llmodel.mjs │ ├── long-context.mjs │ ├── model-switching.mjs │ ├── stateless.mjs │ ├── streaming.mjs │ └── system.mjs │ ├── src │ ├── chat-session.js │ ├── config.js │ ├── gpt4all.d.ts │ ├── gpt4all.js │ ├── models.js │ └── util.js │ ├── test │ ├── gpt4all.test.js │ └── models.json │ └── yarn.lock ├── gpt4all-chat ├── .flake8 ├── CHANGELOG.md ├── CMakeLists.txt ├── LICENSE ├── build_and_run.md ├── cmake │ ├── Modules │ │ ├── SignMacOSBinaries.cmake │ │ └── SignWindowsBinaries.cmake │ ├── cpack-steal-config.cmake.in │ ├── cpack_config.cmake │ ├── deploy-qt-linux.cmake.in │ ├── deploy-qt-mac.cmake.in │ ├── deploy-qt-windows.cmake.in │ ├── download_model.cmake │ ├── installer_control.qs │ ├── installer_gpt4all_component.qs │ ├── installer_maintenancetool_component.qs │ └── sign_dmg.py ├── contributing_translations.md ├── deps │ └── CMakeLists.txt ├── dev-requirements.txt ├── flatpak-manifest │ ├── io.gpt4all.gpt4all.appdata.xml │ ├── io.gpt4all.gpt4all.desktop │ └── screenshots │ │ ├── chat.png │ │ ├── models.png │ │ └── welcome.png ├── icons │ ├── antenna_1.svg │ ├── antenna_2.svg │ ├── antenna_3.svg │ ├── caret_down.svg │ ├── caret_right.svg │ ├── changelog.svg │ ├── chat.svg │ ├── check.svg │ ├── close.svg │ ├── copy.svg │ ├── db.svg │ ├── discord.svg │ ├── download.svg │ ├── edit.svg │ ├── eject.svg │ ├── email.svg │ ├── file-doc.svg │ ├── file-docx.svg │ ├── file-md.svg │ ├── file-pdf.svg │ ├── file-txt.svg │ ├── file-xls.svg │ ├── file.svg │ ├── github.svg │ ├── globe.svg │ ├── gpt4all-32.png │ ├── gpt4all-48.png │ ├── gpt4all.svg │ ├── gpt4all_transparent.svg │ ├── groq.svg │ ├── home.svg │ ├── image.svg │ ├── info.svg │ ├── left_panel_closed.svg │ ├── left_panel_open.svg │ ├── local-docs.svg │ ├── mistral.svg │ ├── models.svg │ ├── network.svg │ ├── nomic_logo.svg │ ├── notes.svg │ ├── openai.svg │ ├── paperclip.svg │ ├── plus.svg │ ├── plus_circle.svg │ ├── question.svg │ ├── recycle.svg │ ├── redo.svg │ ├── regenerate.svg │ ├── search.svg │ ├── send_message.svg │ ├── settings.svg │ ├── stack.svg │ ├── stop_generating.svg │ ├── thumbs_down.svg │ ├── thumbs_up.svg │ ├── trash.svg │ ├── twitter.svg │ ├── undo.svg │ ├── up_down.svg │ ├── webpage.svg │ └── you.svg ├── main.qml ├── metadata │ ├── latestnews.md │ ├── models.json │ ├── models2.json │ ├── models3.json │ └── release.json ├── pyproject.toml ├── qa_checklist.md ├── qml │ ├── AddCollectionView.qml │ ├── AddGPT4AllModelView.qml │ ├── AddHFModelView.qml │ ├── AddModelView.qml │ ├── AddRemoteModelView.qml │ ├── ApplicationSettings.qml │ ├── ChatCollapsibleItem.qml │ ├── ChatDrawer.qml │ ├── ChatItemView.qml │ ├── ChatMessageButton.qml │ ├── ChatTextItem.qml │ ├── ChatView.qml │ ├── CollectionsDrawer.qml │ ├── ConfirmationDialog.qml │ ├── HomeView.qml │ ├── LocalDocsSettings.qml │ ├── LocalDocsView.qml │ ├── ModelSettings.qml │ ├── ModelsView.qml │ ├── MyBusyIndicator.qml │ ├── MyButton.qml │ ├── MyCheckBox.qml │ ├── MyComboBox.qml │ ├── MyDialog.qml │ ├── MyDirectoryField.qml │ ├── MyFancyLink.qml │ ├── MyFileDialog.qml │ ├── MyFileIcon.qml │ ├── MyFolderDialog.qml │ ├── MyMenu.qml │ ├── MyMenuItem.qml │ ├── MyMiniButton.qml │ ├── MySettingsButton.qml │ ├── MySettingsDestructiveButton.qml │ ├── MySettingsLabel.qml │ ├── MySettingsStack.qml │ ├── MySettingsTab.qml │ ├── MySlug.qml │ ├── MyTabButton.qml │ ├── MyTextArea.qml │ ├── MyTextButton.qml │ ├── MyTextField.qml │ ├── MyToolButton.qml │ ├── MyWelcomeButton.qml │ ├── NetworkDialog.qml │ ├── NewVersionDialog.qml │ ├── PopupDialog.qml │ ├── RemoteModelCard.qml │ ├── SettingsView.qml │ ├── StartupDialog.qml │ ├── Theme.qml │ ├── ThumbsDownDialog.qml │ ├── Toast.qml │ └── ToastManager.qml ├── resources │ ├── gpt4all.icns │ ├── gpt4all.ico │ └── gpt4all.rc ├── src │ ├── chat.cpp │ ├── chat.h │ ├── chatapi.cpp │ ├── chatapi.h │ ├── chatlistmodel.cpp │ ├── chatlistmodel.h │ ├── chatllm.cpp │ ├── chatllm.h │ ├── chatmodel.cpp │ ├── chatmodel.h │ ├── chatviewtextprocessor.cpp │ ├── chatviewtextprocessor.h │ ├── codeinterpreter.cpp │ ├── codeinterpreter.h │ ├── config.h.in │ ├── database.cpp │ ├── database.h │ ├── download.cpp │ ├── download.h │ ├── embllm.cpp │ ├── embllm.h │ ├── jinja_helpers.cpp │ ├── jinja_helpers.h │ ├── jinja_replacements.cpp │ ├── jinja_replacements.h │ ├── llm.cpp │ ├── llm.h │ ├── localdocs.cpp │ ├── localdocs.h │ ├── localdocsmodel.cpp │ ├── localdocsmodel.h │ ├── logger.cpp │ ├── logger.h │ ├── macosdock.h │ ├── macosdock.mm │ ├── main.cpp │ ├── modellist.cpp │ ├── modellist.h │ ├── mysettings.cpp │ ├── mysettings.h │ ├── network.cpp │ ├── network.h │ ├── server.cpp │ ├── server.h │ ├── tool.cpp │ ├── tool.h │ ├── toolcallparser.cpp │ ├── toolcallparser.h │ ├── toolmodel.cpp │ ├── toolmodel.h │ ├── utils.h │ ├── utils.inl │ ├── xlsxtomd.cpp │ └── xlsxtomd.h ├── system_requirements.md ├── test-requirements.txt ├── tests │ ├── CMakeLists.txt │ ├── cpp │ │ ├── basic_test.cpp │ │ └── test_main.cpp │ └── python │ │ ├── __init__.py │ │ ├── config.py.in │ │ └── test_server_api.py └── translations │ ├── gpt4all_en_US.ts │ ├── gpt4all_es_MX.ts │ ├── gpt4all_it_IT.ts │ ├── gpt4all_pt_BR.ts │ ├── gpt4all_ro_RO.ts │ ├── gpt4all_zh_CN.ts │ └── gpt4all_zh_TW.ts ├── gpt4all-lora-demo.gif ├── gpt4all-training ├── GPT-J_MAP.md ├── README.md ├── TRAINING_LOG.md ├── build_map.py ├── clean.py ├── configs │ ├── deepspeed │ │ ├── ds_config.json │ │ ├── ds_config_gptj.json │ │ ├── ds_config_gptj_lora.json │ │ ├── ds_config_mpt.json │ │ └── ds_config_pythia.json │ ├── eval │ │ ├── generate_baseline.yaml │ │ ├── generate_gpt4all_gptj.yaml │ │ ├── generate_gpt4all_gptj_lora.yaml │ │ └── generate_gpt4all_llama_lora.yaml │ ├── generate │ │ ├── generate.yaml │ │ ├── generate_gptj.yaml │ │ ├── generate_gptj_lora.yaml │ │ └── generate_llama.yaml │ ├── inference │ │ └── gptj.yaml │ └── train │ │ ├── finetune.yaml │ │ ├── finetune_falcon.yaml │ │ ├── finetune_gptj.yaml │ │ ├── finetune_gptj_lora.yaml │ │ ├── finetune_lora.yaml │ │ ├── finetune_mpt.yaml │ │ └── finetune_openllama.yaml ├── create_hostname.sh ├── data.py ├── env.yaml ├── eval_figures.py ├── eval_self_instruct.py ├── figs │ ├── clustering_overfit.png │ ├── duplicate_loss.png │ ├── first_lora.png │ ├── overfit-gpt-j.png │ ├── perplexity_hist.png │ └── single_epoch.png ├── generate.py ├── inference.py ├── launcher.sh ├── old-README.md ├── read.py ├── requirements.txt └── train.py └── roadmap.md /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | setup: true 3 | orbs: 4 | path-filtering: circleci/path-filtering@1.3.0 5 | 6 | workflows: 7 | version: 2.1 8 | generate-config: 9 | jobs: 10 | - path-filtering/filter: 11 | filters: 12 | tags: 13 | only: 14 | - /.*/ 15 | base-revision: main 16 | config-path: .circleci/continue_config.yml 17 | mapping: | 18 | .circleci/.* run-all-workflows true 19 | gpt4all-backend/.* run-all-workflows true 20 | gpt4all-bindings/python/.* run-python-workflow true 21 | gpt4all-bindings/typescript/.* run-ts-workflow true 22 | gpt4all-chat/.* run-chat-workflow true 23 | -------------------------------------------------------------------------------- /.circleci/grab_notary_id.py: -------------------------------------------------------------------------------- 1 | import re 2 | import sys 3 | 4 | ID_REG = r"id: (.*)" 5 | 6 | def main() -> None: 7 | notary_log = sys.argv[1] 8 | with open(notary_log, "r") as f: 9 | notary_output = f.read() 10 | id_m = re.search(ID_REG, notary_output) 11 | if id_m: 12 | print(id_m.group(1)) 13 | else: 14 | raise RuntimeError("Unable to parse ID from notarization logs") 15 | 16 | if __name__ == "__main__": 17 | main() -------------------------------------------------------------------------------- /.codespellrc: -------------------------------------------------------------------------------- 1 | [codespell] 2 | ignore-words-list = blong, afterall, assistent, crasher, requestor 3 | skip = ./.git,./gpt4all-chat/translations,*.pdf,*.svg,*.lock 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bindings-bug.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F6E0 Bindings Bug Report" 3 | about: A bug report for the GPT4All Bindings 4 | labels: ["bindings", "bug-unconfirmed"] 5 | --- 6 | 7 | 8 | 9 | ### Bug Report 10 | 11 | 12 | 13 | ### Example Code 14 | 15 | 16 | 17 | ### Steps to Reproduce 18 | 19 | 20 | 21 | 1. 22 | 2. 23 | 3. 24 | 25 | ### Expected Behavior 26 | 27 | 28 | 29 | ### Your Environment 30 | 31 | - Bindings version (e.g. "Version" from `pip show gpt4all`): 32 | - Operating System: 33 | - Chat model used (if applicable): 34 | 35 | 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/chat-bug.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F4AC GPT4All Bug Report" 3 | about: A bug report for GPT4All Chat 4 | labels: ["chat", "bug-unconfirmed"] 5 | --- 6 | 7 | 8 | 9 | ### Bug Report 10 | 11 | 12 | 13 | ### Steps to Reproduce 14 | 15 | 16 | 17 | 1. 18 | 2. 19 | 3. 20 | 21 | ### Expected Behavior 22 | 23 | 24 | 25 | ### Your Environment 26 | 27 | - GPT4All version: 28 | - Operating System: 29 | - Chat model used (if applicable): 30 | 31 | 32 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F4C4 Documentation" 3 | about: An issue related to the GPT4All documentation 4 | labels: ["documentation"] 5 | --- 6 | 7 | ### Documentation 8 | 9 | 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F680 Feature Request" 3 | about: Submit a proposal/request for a new GPT4All feature 4 | title: "[Feature] Feature request title..." 5 | labels: ["enhancement"] 6 | --- 7 | 8 | ### Feature Request 9 | 10 | 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/other-bug.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "\U0001F41B Other Bug Report" 3 | about: A bug in another component of GPT4All 4 | labels: ["bug-unconfirmed"] 5 | --- 6 | 7 | 8 | 9 | ### Bug Report 10 | 11 | 12 | 13 | ### Steps to Reproduce 14 | 15 | 16 | 17 | 1. 18 | 2. 19 | 3. 20 | 21 | ### Expected Behavior 22 | 23 | 24 | 25 | ### Your Environment 26 | 27 | - GPT4All version (if applicable): 28 | - Operating System: 29 | - Chat model used (if applicable): 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Describe your changes 2 | 3 | ## Issue ticket number and link 4 | 5 | ## Checklist before requesting a review 6 | - [ ] I have performed a self-review of my code. 7 | - [ ] If it is a core feature, I have added thorough tests. 8 | - [ ] I have added thorough documentation for my code. 9 | - [ ] I have tagged PR with relevant project labels. I acknowledge that a PR without labels may be dismissed. 10 | - [ ] If this PR addresses a bug, I have provided both a screenshot/video of the original bug and the working solution. 11 | 12 | ## Demo 13 | 14 | 15 | ### Steps to Reproduce 16 | 17 | 18 | ## Notes 19 | 20 | -------------------------------------------------------------------------------- /.github/workflows/close_issues.yml: -------------------------------------------------------------------------------- 1 | # This workflow will close issues that do not have labels or additional comments. 2 | # Trigger manually. 3 | 4 | name: "Close Issues" 5 | on: 6 | workflow_dispatch: 7 | 8 | jobs: 9 | close_issues: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Close issues without label or comment 13 | uses: actions/github-script@v3 14 | with: 15 | github-token: ${{secrets.GITHUB_TOKEN}} 16 | script: | 17 | const repo = context.repo; 18 | let page = 1; 19 | let issues = []; 20 | while (true) { 21 | const result = await github.issues.listForRepo({...repo, per_page: 100, page: page}); 22 | if (result.data.length === 0) break; 23 | issues = issues.concat(result.data); 24 | page += 1; 25 | } 26 | for (let { number } of issues) { 27 | const issueData = await github.issues.get({...repo, issue_number: number}); 28 | const comments = await github.issues.listComments({...repo, issue_number: number}); 29 | if (issueData.data.labels.length === 0 && comments.data.length < 1) { 30 | await github.issues.update({...repo, issue_number: number, state: 'closed'}); 31 | await github.issues.createComment({...repo, issue_number: number, body: 'Issue closed as it does not have any labels or comments.'}); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/codespell.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Codespell 3 | 4 | on: 5 | push: 6 | branches: [main] 7 | pull_request: 8 | branches: [main] 9 | 10 | jobs: 11 | codespell: 12 | name: Check for spelling errors 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v4 18 | - name: Codespell 19 | uses: codespell-project/actions-codespell@v2 20 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "llama.cpp-mainline"] 2 | path = gpt4all-backend/deps/llama.cpp-mainline 3 | url = https://github.com/nomic-ai/llama.cpp.git 4 | branch = master 5 | [submodule "gpt4all-chat/usearch"] 6 | path = gpt4all-chat/deps/usearch 7 | url = https://github.com/nomic-ai/usearch.git 8 | [submodule "gpt4all-chat/deps/SingleApplication"] 9 | path = gpt4all-chat/deps/SingleApplication 10 | url = https://github.com/nomic-ai/SingleApplication.git 11 | [submodule "gpt4all-chat/deps/fmt"] 12 | path = gpt4all-chat/deps/fmt 13 | url = https://github.com/fmtlib/fmt.git 14 | [submodule "gpt4all-chat/deps/DuckX"] 15 | path = gpt4all-chat/deps/DuckX 16 | url = https://github.com/nomic-ai/DuckX.git 17 | [submodule "gpt4all-chat/deps/QXlsx"] 18 | path = gpt4all-chat/deps/QXlsx 19 | url = https://github.com/nomic-ai/QXlsx.git 20 | [submodule "gpt4all-chat/deps/minja"] 21 | path = gpt4all-chat/deps/minja 22 | url = https://github.com/nomic-ai/minja.git 23 | [submodule "gpt4all-chat/deps/json"] 24 | path = gpt4all-chat/deps/json 25 | url = https://github.com/nlohmann/json.git 26 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Nomic, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /common/common.cmake: -------------------------------------------------------------------------------- 1 | function(gpt4all_add_warning_options target) 2 | if (MSVC) 3 | return() 4 | endif() 5 | target_compile_options("${target}" PRIVATE 6 | # base options 7 | -Wall 8 | -Wextra 9 | # extra options 10 | -Wcast-align 11 | -Wextra-semi 12 | -Wformat=2 13 | -Wmissing-include-dirs 14 | -Wsuggest-override 15 | -Wvla 16 | # errors 17 | -Werror=format-security 18 | -Werror=init-self 19 | -Werror=pointer-arith 20 | -Werror=undef 21 | # disabled warnings 22 | -Wno-sign-compare 23 | -Wno-unused-parameter 24 | ) 25 | if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") 26 | target_compile_options("${target}" PRIVATE 27 | -Wduplicated-branches 28 | -Wduplicated-cond 29 | -Wlogical-op 30 | -Wno-reorder 31 | -Wno-null-dereference 32 | ) 33 | elseif (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") 34 | target_compile_options("${target}" PRIVATE 35 | -Wunreachable-code-break 36 | -Wunreachable-code-return 37 | -Werror=pointer-integer-compare 38 | -Wno-reorder-ctor 39 | ) 40 | endif() 41 | endfunction() 42 | -------------------------------------------------------------------------------- /gpt4all-backend/include/gpt4all-backend/sysinfo.h: -------------------------------------------------------------------------------- 1 | #ifndef SYSINFO_H 2 | #define SYSINFO_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #if defined(__linux__) 10 | # include 11 | #elif defined(__APPLE__) 12 | # include 13 | # include 14 | #elif defined(_WIN32) 15 | # define WIN32_LEAN_AND_MEAN 16 | # ifndef NOMINMAX 17 | # define NOMINMAX 18 | # endif 19 | # include 20 | #endif 21 | 22 | static long long getSystemTotalRAMInBytes() 23 | { 24 | long long totalRAM = 0; 25 | 26 | #if defined(__linux__) 27 | std::ifstream file("/proc/meminfo"); 28 | std::string line; 29 | while (std::getline(file, line)) { 30 | if (line.find("MemTotal") != std::string::npos) { 31 | std::string memTotalStr = line.substr(line.find(":") + 1); 32 | memTotalStr.erase(0, memTotalStr.find_first_not_of(" ")); 33 | memTotalStr = memTotalStr.substr(0, memTotalStr.find(" ")); 34 | totalRAM = std::stoll(memTotalStr) * 1024; // Convert from KB to bytes 35 | break; 36 | } 37 | } 38 | file.close(); 39 | #elif defined(__APPLE__) 40 | int mib[2] = {CTL_HW, HW_MEMSIZE}; 41 | size_t length = sizeof(totalRAM); 42 | sysctl(mib, 2, &totalRAM, &length, NULL, 0); 43 | #elif defined(_WIN32) 44 | MEMORYSTATUSEX memoryStatus; 45 | memoryStatus.dwLength = sizeof(memoryStatus); 46 | GlobalMemoryStatusEx(&memoryStatus); 47 | totalRAM = memoryStatus.ullTotalPhys; 48 | #endif 49 | 50 | return totalRAM; 51 | } 52 | 53 | static double getSystemTotalRAMInGB() 54 | { 55 | return static_cast(getSystemTotalRAMInBytes()) / (1024 * 1024 * 1024); 56 | } 57 | 58 | static std::string getSystemTotalRAMInGBString() 59 | { 60 | std::stringstream ss; 61 | ss << std::fixed << std::setprecision(2) << getSystemTotalRAMInGB() << " GB"; 62 | return ss.str(); 63 | } 64 | 65 | #endif // SYSINFO_H 66 | -------------------------------------------------------------------------------- /gpt4all-backend/src/dlhandle.cpp: -------------------------------------------------------------------------------- 1 | #include "dlhandle.h" 2 | 3 | #include 4 | 5 | #ifndef _WIN32 6 | # include 7 | #else 8 | # include 9 | # include 10 | # define WIN32_LEAN_AND_MEAN 11 | # ifndef NOMINMAX 12 | # define NOMINMAX 13 | # endif 14 | # include 15 | #endif 16 | 17 | using namespace std::string_literals; 18 | namespace fs = std::filesystem; 19 | 20 | 21 | #ifndef _WIN32 22 | 23 | Dlhandle::Dlhandle(const fs::path &fpath) 24 | { 25 | chandle = dlopen(fpath.c_str(), RTLD_LAZY | RTLD_LOCAL); 26 | if (!chandle) { 27 | throw Exception("dlopen: "s + dlerror()); 28 | } 29 | } 30 | 31 | Dlhandle::~Dlhandle() 32 | { 33 | if (chandle) dlclose(chandle); 34 | } 35 | 36 | void *Dlhandle::get_internal(const char *symbol) const 37 | { 38 | return dlsym(chandle, symbol); 39 | } 40 | 41 | #else // defined(_WIN32) 42 | 43 | Dlhandle::Dlhandle(const fs::path &fpath) 44 | { 45 | fs::path afpath = fs::absolute(fpath); 46 | 47 | // Suppress the "Entry Point Not Found" dialog, caused by outdated nvcuda.dll from the GPU driver 48 | UINT lastErrorMode = GetErrorMode(); 49 | SetErrorMode(lastErrorMode | SEM_FAILCRITICALERRORS); 50 | 51 | chandle = LoadLibraryExW(afpath.c_str(), NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR); 52 | 53 | SetErrorMode(lastErrorMode); 54 | 55 | if (!chandle) { 56 | DWORD err = GetLastError(); 57 | std::ostringstream ss; 58 | ss << "LoadLibraryExW failed with error 0x" << std::hex << err; 59 | throw Exception(ss.str()); 60 | } 61 | } 62 | 63 | Dlhandle::~Dlhandle() 64 | { 65 | if (chandle) FreeLibrary(HMODULE(chandle)); 66 | } 67 | 68 | void *Dlhandle::get_internal(const char *symbol) const 69 | { 70 | return GetProcAddress(HMODULE(chandle), symbol); 71 | } 72 | 73 | #endif // defined(_WIN32) 74 | -------------------------------------------------------------------------------- /gpt4all-backend/src/dlhandle.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | namespace fs = std::filesystem; 9 | 10 | 11 | class Dlhandle { 12 | void *chandle = nullptr; 13 | 14 | public: 15 | class Exception : public std::runtime_error { 16 | public: 17 | using std::runtime_error::runtime_error; 18 | }; 19 | 20 | Dlhandle() = default; 21 | Dlhandle(const fs::path &fpath); 22 | Dlhandle(const Dlhandle &o) = delete; 23 | Dlhandle(Dlhandle &&o) 24 | : chandle(o.chandle) 25 | { 26 | o.chandle = nullptr; 27 | } 28 | 29 | ~Dlhandle(); 30 | 31 | Dlhandle &operator=(Dlhandle &&o) { 32 | chandle = std::exchange(o.chandle, nullptr); 33 | return *this; 34 | } 35 | 36 | template 37 | T *get(const std::string &symbol) const { 38 | return reinterpret_cast(get_internal(symbol.c_str())); 39 | } 40 | 41 | auto get_fnc(const std::string &symbol) const { 42 | return get(symbol); 43 | } 44 | 45 | private: 46 | void *get_internal(const char *symbol) const; 47 | }; 48 | -------------------------------------------------------------------------------- /gpt4all-backend/src/utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | #ifdef NDEBUG 6 | # ifdef __has_builtin 7 | # if __has_builtin(__builtin_unreachable) 8 | # define UNREACHABLE() __builtin_unreachable() 9 | # else 10 | # define UNREACHABLE() do {} while (0) 11 | # endif 12 | # else 13 | # define UNREACHABLE() do {} while (0) 14 | # endif 15 | #else 16 | # define UNREACHABLE() assert(!"Unreachable statement was reached") 17 | #endif 18 | -------------------------------------------------------------------------------- /gpt4all-bindings/README.md: -------------------------------------------------------------------------------- 1 | # GPT4All Language Bindings 2 | These are the language bindings for the GPT4All backend. They provide functionality to load GPT4All models (and other llama.cpp models), generate text, and (in the case of the Python bindings) embed text as a vector representation. 3 | 4 | See their respective folders for language-specific documentation. 5 | 6 | ### Languages 7 | - [Python](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python) (Nomic official, maintained by [@cebtenzzre](https://github.com/cebtenzzre)) 8 | - [Node.js/Typescript](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/typescript) (community, maintained by [@jacoobes](https://github.com/jacoobes) and [@iimez](https://github.com/iimez)) 9 | 10 |
11 |
12 | 13 |
Archived Bindings 14 |
15 | 16 | The following bindings have been removed from this repository due to lack of maintenance. If adopted, they can be brought back—feel free to message a developer on Dicsord if you are interested in maintaining one of them. Below are links to their last available version (not necessarily the last working version). 17 | - C#: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/csharp) 18 | - Java: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/java) 19 | - Go: [41c9013f](https://github.com/nomic-ai/gpt4all/tree/41c9013fa46a194b3e4fee6ced1b9d1b65e177ac/gpt4all-bindings/golang) 20 | 21 |
22 | -------------------------------------------------------------------------------- /gpt4all-bindings/cli/README.md: -------------------------------------------------------------------------------- 1 | # GPT4All Command-Line Interface (CLI) 2 | 3 | GPT4All on the command-line. 4 | 5 | More details on the [wiki](https://github.com/nomic-ai/gpt4all/wiki/Python-CLI). 6 | 7 | ## Quickstart 8 | 9 | The CLI is based on the `gpt4all` Python bindings and the `typer` package. 10 | 11 | The following shows one way to get started with the CLI, the documentation has more information. 12 | Typically, you will want to replace `python` with `python3` on _Unix-like_ systems and `py -3` on 13 | _Windows_. Also, it's assumed you have all the necessary Python components already installed. 14 | 15 | The CLI is a self-contained Python script named [app.py] ([download][app.py-download]). As long as 16 | its package dependencies are present, you can download and run it from wherever you like. 17 | 18 | [app.py]: https://github.com/nomic-ai/gpt4all/blob/main/gpt4all-bindings/cli/app.py 19 | [app.py-download]: https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-bindings/cli/app.py 20 | 21 | ```shell 22 | # optional but recommended: create and use a virtual environment 23 | python -m venv gpt4all-cli 24 | ``` 25 | _Windows_ and _Unix-like_ systems differ slightly in how you activate a _virtual environment_: 26 | - _Unix-like_, typically: `. gpt4all-cli/bin/activate` 27 | - _Windows_: `gpt4all-cli\Scripts\activate` 28 | 29 | Then: 30 | ```shell 31 | # pip-install the necessary packages; omit '--user' if using a virtual environment 32 | python -m pip install --user --upgrade gpt4all typer 33 | # run the CLI 34 | python app.py repl 35 | ``` 36 | By default, it will automatically download the `Mistral Instruct` model to `.cache/gpt4all/` in your 37 | user directory, if necessary. 38 | 39 | If you have already saved a model beforehand, specify its path with the `-m`/`--model` argument, 40 | for example: 41 | ```shell 42 | python app.py repl --model /home/user/my-gpt4all-models/mistral-7b-instruct-v0.1.Q4_0.gguf 43 | ``` 44 | -------------------------------------------------------------------------------- /gpt4all-bindings/cli/developer_notes.md: -------------------------------------------------------------------------------- 1 | # Developing the CLI 2 | ## Documentation 3 | Documentation can be found in three places: 4 | - `app.py` docstrings & comments 5 | - a Readme: `gpt4all-bindings/cli/README.md` 6 | - the actual CLI documentation: `gpt4all-bindings/python/docs/gpt4all_cli.md` 7 | 8 | The _docstrings_ are meant for programmatic use. Since the CLI is primarily geared towards users and 9 | not to build on top, they're kept terse. 10 | 11 | The _Readme_ is mostly meant for users and includes: 12 | - a link to the _CLI documentation_ (on the [website]) 13 | - a Quickstart section with some guidance on how to get started with a sane setup 14 | 15 | The _CLI documentation_ and other documentation are located in the above mentioned `docs/` folder. 16 | They're in Markdown format and built for the [website]. Of the three, they should be the most 17 | detailed. 18 | 19 | [website]: https://docs.gpt4all.io/gpt4all_cli.html 20 | 21 | 22 | ## Versioning 23 | The version number should now follow the `gpt4all` PyPI package, so compatibility is more clear. 24 | 25 | The one place to change it is the `namedtuple` called `VERSION_INFO`. 26 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | known_third_party=geopy,nltk,np,numpy,pandas,pysbd,fire,torch 3 | 4 | line_length=120 5 | include_trailing_comma=True 6 | multi_line_output=3 7 | use_parentheses=True -------------------------------------------------------------------------------- /gpt4all-bindings/python/LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Nomic, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /gpt4all-bindings/python/MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include gpt4all/llmodel_DO_NOT_MODIFY * -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/add.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/add.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/add_model_gpt4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/add_model_gpt4.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/attach_spreadsheet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/attach_spreadsheet.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/baelor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/baelor.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/before_first_chat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/before_first_chat.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/chat_window.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/chat_window.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/closed_chat_panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/closed_chat_panel.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/configure_doc_collection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/configure_doc_collection.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/disney_spreadsheet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/disney_spreadsheet.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/download.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/download_llama.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/download_llama.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/explore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/explore.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/explore_models.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/explore_models.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/favicon.ico -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/good_tyrion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/good_tyrion.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/got_docs_ready.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/got_docs_ready.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/got_done.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/got_done.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/gpt4all_home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/gpt4all_home.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/gpt4all_xlsx_attachment.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/gpt4all_xlsx_attachment.mp4 -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/installed_models.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/installed_models.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/linux.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/linux.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/local_embed.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/local_embed.gif -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/mac.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/mac.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/models_page_icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/models_page_icon.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/new_docs_annotated.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/new_docs_annotated.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/new_docs_annotated_filled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/new_docs_annotated_filled.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/new_first_chat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/new_first_chat.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/no_docs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/no_docs.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/no_models.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/no_models.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/no_models_tiny.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/no_models_tiny.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/nomic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/nomic.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/obsidian_adding_collection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/obsidian_adding_collection.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/obsidian_docs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/obsidian_docs.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/obsidian_response.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/obsidian_response.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/obsidian_sources.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/obsidian_sources.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/open_chat_panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/open_chat_panel.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/open_local_docs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/open_local_docs.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/open_sources.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/open_sources.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/osbsidian_user_interaction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/osbsidian_user_interaction.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/search_mistral.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/search_mistral.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/search_settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/search_settings.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/spreadsheet_chat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/spreadsheet_chat.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/syrio_snippets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/syrio_snippets.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/three_model_options.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/three_model_options.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/ubuntu.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/assets/windows.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/docs/assets/windows.png -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/css/custom.css: -------------------------------------------------------------------------------- 1 | .md-content h1, 2 | .md-content h2 { 3 | margin-top: 0.5em; 4 | margin-bottom: 0.5em; 5 | } 6 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/gpt4all_desktop/localdocs.md: -------------------------------------------------------------------------------- 1 | # LocalDocs 2 | 3 | LocalDocs brings the information you have from files on-device into your LLM chats - **privately**. 4 | 5 | ## Create LocalDocs 6 | 7 | !!! note "Create LocalDocs" 8 | 9 | 1. Click `+ Add Collection`. 10 | 11 | 2. Name your collection and link it to a folder. 12 | 13 | 14 | 15 | 18 | 21 | 22 |
16 | new GOT Docs 17 | 19 | new GOT Docs filled out 20 |
23 | 24 | 3. Click `Create Collection`. Progress for the collection is displayed on the LocalDocs page. 25 | 26 | ![Embedding in progress](../assets/baelor.png) 27 | 28 | You will see a green `Ready` indicator when the entire collection is ready. 29 | 30 | Note: you can still chat with the files that are ready before the entire collection is ready. 31 | 32 | ![Embedding complete](../assets/got_done.png) 33 | 34 | Later on if you modify your LocalDocs settings you can rebuild your collections with your new settings. 35 | 36 | 4. In your chats, open `LocalDocs` with button in top-right corner to give your LLM context from those files. 37 | 38 | ![LocalDocs result](../assets/syrio_snippets.png) 39 | 40 | 5. See which files were referenced by clicking `Sources` below the LLM responses. 41 | 42 | ![Sources](../assets/open_sources.png) 43 | 44 | ## How It Works 45 | 46 | A LocalDocs collection uses Nomic AI's free and fast on-device embedding models to index your folder into text snippets that each get an **embedding vector**. These vectors allow us to find snippets from your files that are semantically similar to the questions and prompts you enter in your chats. We then include those semantically similar snippets in the prompt to the LLM. 47 | 48 | To try the embedding models yourself, we recommend using the [Nomic Python SDK](https://docs.nomic.ai/atlas/capabilities/embeddings) 49 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/gpt4all_desktop/quickstart.md: -------------------------------------------------------------------------------- 1 | # GPT4All Desktop 2 | 3 | The GPT4All Desktop Application allows you to download and run large language models (LLMs) locally & privately on your device. 4 | 5 | With GPT4All, you can chat with models, turn your local files into information sources for models [(LocalDocs)](localdocs.md), or browse models available online to download onto your device. 6 | 7 | [Official Video Tutorial](https://www.youtube.com/watch?v=gQcZDXRVJok) 8 | 9 | ## Quickstart 10 | 11 | !!! note "Quickstart" 12 | 13 | 1. Install GPT4All for your operating system and open the application. 14 | 15 |
16 | [Download for Windows](https://gpt4all.io/installers/gpt4all-installer-win64.exe)      17 | [Download for Mac](https://gpt4all.io/installers/gpt4all-installer-darwin.dmg)      18 | [Download for Linux](https://gpt4all.io/installers/gpt4all-installer-linux.run) 19 |
20 | 21 | 2. Hit `Start Chatting`. ![GPT4All home page](../assets/gpt4all_home.png) 22 | 23 | 3. Click `+ Add Model`. 24 | 25 | 4. Download a model. We recommend starting with Llama 3, but you can [browse more models](models.md). ![Download a model](../assets/download_llama.png) 26 | 27 | 5. Once downloaded, go to Chats (below Home and above Models in the menu on the left). 28 | 29 | 6. Click "Load Default Model" (will be Llama 3 or whichever model you downloaded). 30 | 31 | 32 | 33 | 36 | 39 | 40 |
34 | Before first chat 35 | 37 | New first chat 38 |
41 | 42 | 7. Try the [example chats](chats.md) or your own prompts! 43 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/gpt4all_help/faq.md: -------------------------------------------------------------------------------- 1 | # Frequently Asked Questions 2 | 3 | ## Models 4 | 5 | ### Which language models are supported? 6 | 7 | We support models with a `llama.cpp` implementation which have been uploaded to [HuggingFace](https://huggingface.co/). 8 | 9 | ### Which embedding models are supported? 10 | 11 | We support SBert and Nomic Embed Text v1 & v1.5. 12 | 13 | ## Software 14 | 15 | ### What software do I need? 16 | 17 | All you need is to [install GPT4all](../index.md) onto you Windows, Mac, or Linux computer. 18 | 19 | ### Which SDK languages are supported? 20 | 21 | Our SDK is in Python for usability, but these are light bindings around [`llama.cpp`](https://github.com/ggerganov/llama.cpp) implementations that we contribute to for efficiency and accessibility on everyday computers. 22 | 23 | ### Is there an API? 24 | 25 | Yes, you can run your model in server-mode with our [OpenAI-compatible API](https://platform.openai.com/docs/api-reference/completions), which you can configure in [settings](../gpt4all_desktop/settings.md#application-settings) 26 | 27 | ### Can I monitor a GPT4All deployment? 28 | 29 | Yes, GPT4All [integrates](../gpt4all_python/monitoring.md) with [OpenLIT](https://github.com/openlit/openlit) so you can deploy LLMs with user interactions and hardware usage automatically monitored for full observability. 30 | 31 | ### Is there a command line interface (CLI)? 32 | 33 | [Yes](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/cli), we have a lightweight use of the Python client as a CLI. We welcome further contributions! 34 | 35 | ## Hardware 36 | 37 | ### What hardware do I need? 38 | 39 | GPT4All can run on CPU, Metal (Apple Silicon M1+), and GPU. 40 | 41 | ### What are the system requirements? 42 | 43 | Your CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) and you need enough RAM to load a model into memory. 44 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/gpt4all_help/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting 2 | 3 | ## Error Loading Models 4 | 5 | It is possible you are trying to load a model from HuggingFace whose weights are not compatible with our [backend](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings). 6 | 7 | Try downloading one of the officially supported models listed on the main models page in the application. If the problem persists, please share your experience on our [Discord](https://discord.com/channels/1076964370942267462). 8 | 9 | ## Bad Responses 10 | 11 | Try the [example chats](../gpt4all_desktop/chats.md) to double check that your system is implementing models correctly. 12 | 13 | ### Responses Incoherent 14 | 15 | If you are seeing something **not at all** resembling the [example chats](../gpt4all_desktop/chats.md) - for example, if the responses you are seeing look nonsensical - try [downloading a different model](../gpt4all_desktop/models.md), and please share your experience on our [Discord](https://discord.com/channels/1076964370942267462). 16 | 17 | ### Responses Incorrect 18 | 19 | LLMs can be unreliable. It's helpful to know what their training data was - they are less likely to be correct when asking about data they were not trained on unless you give the necessary information in the prompt as **context**. 20 | 21 | Giving LLMs additional context, like chatting using [LocalDocs](../gpt4all_desktop/localdocs.md), can help merge the language model's ability to understand text with the files that you trust to contain the information you need. 22 | 23 | Including information in a prompt is not a guarantee that it will be used correctly, but the more clear and concise your prompts, and the more relevant your prompts are to your files, the better. 24 | 25 | ### LocalDocs Issues 26 | 27 | Occasionally a model - particularly a smaller or overall weaker LLM - may not use the relevant text snippets from the files that were referenced via LocalDocs. If you are seeing this, it can help to use phrases like "in the docs" or "from the provided files" when prompting your model. 28 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/gpt4all_python/ref.md: -------------------------------------------------------------------------------- 1 | # GPT4All Python SDK Reference 2 | ::: gpt4all.gpt4all.GPT4All 3 | 4 | ::: gpt4all.gpt4all.Embed4All -------------------------------------------------------------------------------- /gpt4all-bindings/python/docs/index.md: -------------------------------------------------------------------------------- 1 | # GPT4All Documentation 2 | 3 | GPT4All runs large language models (LLMs) privately on everyday desktops & laptops. 4 | 5 | No API calls or GPUs required - you can just download the application and [get started](gpt4all_desktop/quickstart.md#quickstart). 6 | 7 | !!! note "Desktop Application" 8 | GPT4All runs LLMs as an application on your computer. Nomic's embedding models can bring information from your local documents and files into your chats. It's fast, on-device, and completely **private**. 9 | 10 |
11 | [Download for Windows](https://gpt4all.io/installers/gpt4all-installer-win64.exe)      12 | [Download for Mac](https://gpt4all.io/installers/gpt4all-installer-darwin.dmg)      13 | [Download for Linux](https://gpt4all.io/installers/gpt4all-installer-linux.run) 14 |
15 | 16 | !!! note "Python SDK" 17 | Use GPT4All in Python to program with LLMs implemented with the [`llama.cpp`](https://github.com/ggerganov/llama.cpp) backend and [Nomic's C backend](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-backend). Nomic contributes to open source software like [`llama.cpp`](https://github.com/ggerganov/llama.cpp) to make LLMs accessible and efficient **for all**. 18 | 19 | ```bash 20 | pip install gpt4all 21 | ``` 22 | 23 | ```python 24 | from gpt4all import GPT4All 25 | model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf") # downloads / loads a 4.66GB LLM 26 | with model.chat_session(): 27 | print(model.generate("How can I run LLMs efficiently on my laptop?", max_tokens=1024)) 28 | ``` 29 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/gpt4all/__init__.py: -------------------------------------------------------------------------------- 1 | from .gpt4all import CancellationError as CancellationError, Embed4All as Embed4All, GPT4All as GPT4All 2 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/gpt4all/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-bindings/python/gpt4all/tests/__init__.py -------------------------------------------------------------------------------- /gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import time 4 | from io import StringIO 5 | 6 | from gpt4all import Embed4All, GPT4All 7 | 8 | 9 | def time_embedding(i, embedder): 10 | text = 'foo bar ' * i 11 | start_time = time.time() 12 | output = embedder.embed(text) 13 | end_time = time.time() 14 | elapsed_time = end_time - start_time 15 | print(f"Time report: {2 * i / elapsed_time} tokens/second with {2 * i} tokens taking {elapsed_time} seconds") 16 | 17 | 18 | if __name__ == "__main__": 19 | embedder = Embed4All(n_threads=8) 20 | for i in [2**n for n in range(6, 14)]: 21 | time_embedding(i, embedder) 22 | -------------------------------------------------------------------------------- /gpt4all-bindings/python/makefile: -------------------------------------------------------------------------------- 1 | SHELL:=/bin/bash -o pipefail 2 | ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) 3 | PYTHON:=python3 4 | 5 | env: 6 | if [ ! -d $(ROOT_DIR)/env ]; then $(PYTHON) -m venv $(ROOT_DIR)/env; fi 7 | 8 | dev: env 9 | source env/bin/activate; pip install black isort pytest; pip install -e . 10 | 11 | documentation: 12 | rm -rf ./site && mkdocs build 13 | 14 | wheel: 15 | rm -rf dist/ build/ gpt4all/llmodel_DO_NOT_MODIFY; python setup.py bdist_wheel; 16 | 17 | clean: 18 | rm -rf {.pytest_cache,env,gpt4all.egg-info} 19 | find . | grep -E "(__pycache__|\.pyc|\.pyo$\)" | xargs rm -rf 20 | 21 | black: 22 | source env/bin/activate; black -l 120 -S --target-version py36 gpt4all 23 | 24 | isort: 25 | source env/bin/activate; isort --ignore-whitespace --atomic -w 120 gpt4all 26 | 27 | test: 28 | source env/bin/activate; pytest -s gpt4all/tests -k "not test_inference_long" 29 | 30 | test_all: 31 | source env/bin/activate; pytest -s gpt4all/tests 32 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | BasedOnStyle: Microsoft 4 | ColumnLimit: 120 -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | build/ 3 | prebuilds/ 4 | .yarn/* 5 | !.yarn/patches 6 | !.yarn/plugins 7 | !.yarn/releases 8 | !.yarn/sdks 9 | !.yarn/versions 10 | runtimes/ 11 | compile_flags.txt 12 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/.npmignore: -------------------------------------------------------------------------------- 1 | test/ 2 | spec/ 3 | scripts/ 4 | build -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/.yarnrc.yml: -------------------------------------------------------------------------------- 1 | nodeLinker: node-modules 2 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/binding.ci.gyp: -------------------------------------------------------------------------------- 1 | { 2 | "targets": [ 3 | { 4 | "target_name": "gpt4all", # gpt4all-ts will cause compile error 5 | "include_dirs": [ 6 | "= 18.x.x" 40 | }, 41 | "prettier": { 42 | "endOfLine": "lf", 43 | "tabWidth": 4 44 | }, 45 | "jest": { 46 | "verbose": true 47 | }, 48 | "publishConfig": { 49 | "registry": "https://registry.npmjs.org/", 50 | "access": "public", 51 | "tag": "latest" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/prompt.h: -------------------------------------------------------------------------------- 1 | #ifndef PREDICT_WORKER_H 2 | #define PREDICT_WORKER_H 3 | 4 | #include "llmodel.h" 5 | #include "llmodel_c.h" 6 | #include "napi.h" 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | struct ResponseCallbackData 14 | { 15 | int32_t tokenId; 16 | std::string token; 17 | }; 18 | 19 | struct PromptCallbackData 20 | { 21 | int32_t tokenId; 22 | }; 23 | 24 | struct LLModelWrapper 25 | { 26 | LLModel *llModel = nullptr; 27 | LLModel::PromptContext promptContext; 28 | ~LLModelWrapper() 29 | { 30 | delete llModel; 31 | } 32 | }; 33 | 34 | struct PromptWorkerConfig 35 | { 36 | Napi::Function responseCallback; 37 | bool hasResponseCallback = false; 38 | Napi::Function promptCallback; 39 | bool hasPromptCallback = false; 40 | llmodel_model model; 41 | std::mutex *mutex; 42 | std::string prompt; 43 | std::string promptTemplate; 44 | llmodel_prompt_context context; 45 | std::string result; 46 | bool special = false; 47 | std::string *fakeReply = nullptr; 48 | }; 49 | 50 | class PromptWorker : public Napi::AsyncWorker 51 | { 52 | public: 53 | PromptWorker(Napi::Env env, PromptWorkerConfig config); 54 | ~PromptWorker(); 55 | void Execute() override; 56 | void OnOK() override; 57 | void OnError(const Napi::Error &e) override; 58 | Napi::Promise GetPromise(); 59 | 60 | bool ResponseCallback(int32_t token_id, const std::string token); 61 | bool RecalculateCallback(bool isrecalculating); 62 | bool PromptCallback(int32_t token_id); 63 | 64 | private: 65 | Napi::Promise::Deferred promise; 66 | std::string result; 67 | PromptWorkerConfig _config; 68 | Napi::ThreadSafeFunction _responseCallbackFn; 69 | Napi::ThreadSafeFunction _promptCallbackFn; 70 | }; 71 | 72 | #endif // PREDICT_WORKER_H 73 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/scripts/build.js: -------------------------------------------------------------------------------- 1 | const { spawn } = require("node:child_process"); 2 | const { resolve } = require("path"); 3 | const args = process.argv.slice(2); 4 | const platform = process.platform; 5 | //windows 64bit or 32 6 | if (platform === "win32") { 7 | const path = "scripts/build_msvc.bat"; 8 | spawn(resolve(path), ["/Y", ...args], { shell: true, stdio: "inherit" }); 9 | process.on("data", (s) => console.log(s.toString())); 10 | } else if (platform === "linux" || platform === "darwin") { 11 | const path = "scripts/build_unix.sh"; 12 | spawn(`sh `, [path, args], { 13 | shell: true, 14 | stdio: "inherit", 15 | }); 16 | process.on("data", (s) => console.log(s.toString())); 17 | } 18 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/scripts/build_mingw.ps1: -------------------------------------------------------------------------------- 1 | $ROOT_DIR = '.\runtimes\win-x64' 2 | $BUILD_DIR = '.\runtimes\win-x64\build\mingw' 3 | $LIBS_DIR = '.\runtimes\win-x64\native' 4 | 5 | # cleanup env 6 | Remove-Item -Force -Recurse $ROOT_DIR -ErrorAction SilentlyContinue | Out-Null 7 | mkdir $BUILD_DIR | Out-Null 8 | mkdir $LIBS_DIR | Out-Null 9 | 10 | # build 11 | cmake -G "MinGW Makefiles" -S ..\..\gpt4all-backend -B $BUILD_DIR -DLLAMA_AVX2=ON 12 | cmake --build $BUILD_DIR --parallel --config Release 13 | 14 | # copy native dlls 15 | # cp "C:\ProgramData\mingw64\mingw64\bin\*dll" $LIBS_DIR 16 | cp "$BUILD_DIR\bin\*.dll" $LIBS_DIR 17 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/scripts/build_msvc.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | set "BUILD_TYPE=Release" 4 | set "BUILD_DIR=.\build\win-x64-msvc" 5 | set "LIBS_DIR=.\runtimes\win32-x64" 6 | 7 | REM Cleanup env 8 | rmdir /s /q %BUILD_DIR% 9 | 10 | REM Create directories 11 | mkdir %BUILD_DIR% 12 | mkdir %LIBS_DIR% 13 | 14 | REM Build 15 | cmake -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=%BUILD_TYPE% -S ..\..\gpt4all-backend -B %BUILD_DIR% -A x64 16 | 17 | :BUILD 18 | REM Build the project 19 | cmake --build "%BUILD_DIR%" --parallel --config %BUILD_TYPE% 20 | 21 | REM Check the exit code of the build command 22 | if %errorlevel% neq 0 ( 23 | echo Build failed. Retrying... 24 | goto BUILD 25 | ) 26 | 27 | mkdir runtimes\win32-x64 28 | 29 | REM Copy the DLLs to the desired location 30 | del /F /A /Q %LIBS_DIR% 31 | xcopy /Y "%BUILD_DIR%\bin\%BUILD_TYPE%\*.dll" runtimes\win32-x64\native\ 32 | 33 | echo Batch script execution completed. 34 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/scripts/build_unix.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | SYSNAME=$(uname -s) 4 | 5 | if [ "$SYSNAME" = "Linux" ]; then 6 | BASE_DIR="runtimes/linux-x64" 7 | LIB_EXT="so" 8 | elif [ "$SYSNAME" = "Darwin" ]; then 9 | BASE_DIR="runtimes/osx" 10 | LIB_EXT="dylib" 11 | elif [ -n "$SYSNAME" ]; then 12 | echo "Unsupported system: $SYSNAME" >&2 13 | exit 1 14 | else 15 | echo "\"uname -s\" failed" >&2 16 | exit 1 17 | fi 18 | 19 | NATIVE_DIR="$BASE_DIR/native" 20 | BUILD_DIR="$BASE_DIR/build" 21 | 22 | rm -rf "$BASE_DIR" 23 | mkdir -p "$NATIVE_DIR" "$BUILD_DIR" 24 | 25 | cmake -S ../../gpt4all-backend -B "$BUILD_DIR" && 26 | cmake --build "$BUILD_DIR" -j --config Release && { 27 | cp "$BUILD_DIR"/libgptj*.$LIB_EXT "$NATIVE_DIR"/ 28 | cp "$BUILD_DIR"/libllama*.$LIB_EXT "$NATIVE_DIR"/ 29 | } 30 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/scripts/docs.js: -------------------------------------------------------------------------------- 1 | //Maybe some command line piping would work better, but can't think of platform independent command line tool 2 | 3 | const fs = require('fs'); 4 | 5 | const newPath = '../python/docs/gpt4all_nodejs.md'; 6 | const filepath = './README.md'; 7 | const intro = fs.readFileSync(filepath); 8 | 9 | fs.writeFileSync( 10 | newPath, intro 11 | ); 12 | 13 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/scripts/mkclangd.js: -------------------------------------------------------------------------------- 1 | /// makes compile_flags.txt for clangd server support with this project 2 | /// run this with typescript as your cwd 3 | // 4 | //for debian users make sure to install libstdc++-12-dev 5 | 6 | const nodeaddonapi=require('node-addon-api').include; 7 | 8 | const fsp = require('fs/promises'); 9 | const { existsSync, readFileSync } = require('fs'); 10 | const assert = require('node:assert'); 11 | const findnodeapih = () => { 12 | assert(existsSync("./build"), "Haven't built the application once yet. run node scripts/prebuild.js"); 13 | const dir = readFileSync("./build/config.gypi", 'utf8'); 14 | const nodedir_line = dir.match(/"nodedir": "([^"]+)"/); 15 | assert(nodedir_line, "Found no matches") 16 | assert(nodedir_line[1]); 17 | console.log("node_api.h found at: ", nodedir_line[1]); 18 | return nodedir_line[1]+"/include/node"; 19 | }; 20 | 21 | const knownIncludes = [ 22 | '-I', 23 | './', 24 | '-I', 25 | nodeaddonapi.substring(1, nodeaddonapi.length-1), 26 | '-I', 27 | '../../gpt4all-backend', 28 | '-I', 29 | findnodeapih() 30 | ]; 31 | const knownFlags = [ 32 | "-x", 33 | "c++", 34 | '-std=c++17' 35 | ]; 36 | 37 | 38 | const output = knownFlags.join('\n')+'\n'+knownIncludes.join('\n'); 39 | 40 | fsp.writeFile('./compile_flags.txt', output, 'utf8') 41 | .then(() => console.log('done')) 42 | .catch(() => console.err('failed')); 43 | 44 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/scripts/prebuild.js: -------------------------------------------------------------------------------- 1 | const prebuildify = require("prebuildify"); 2 | 3 | async function createPrebuilds(combinations) { 4 | for (const { platform, arch } of combinations) { 5 | const opts = { 6 | platform, 7 | arch, 8 | napi: true, 9 | targets: ["18.16.0"] 10 | }; 11 | try { 12 | await createPrebuild(opts); 13 | console.log( 14 | `Build succeeded for platform ${opts.platform} and architecture ${opts.arch}` 15 | ); 16 | } catch (err) { 17 | console.error( 18 | `Error building for platform ${opts.platform} and architecture ${opts.arch}:`, 19 | err 20 | ); 21 | } 22 | } 23 | } 24 | 25 | function createPrebuild(opts) { 26 | return new Promise((resolve, reject) => { 27 | prebuildify(opts, (err) => { 28 | if (err) { 29 | reject(err); 30 | } else { 31 | resolve(); 32 | } 33 | }); 34 | }); 35 | } 36 | 37 | let prebuildConfigs; 38 | if(process.platform === 'win32') { 39 | prebuildConfigs = [ 40 | { platform: "win32", arch: "x64" } 41 | ]; 42 | } else if(process.platform === 'linux') { 43 | //Unsure if darwin works, need mac tester! 44 | prebuildConfigs = [ 45 | { platform: "linux", arch: "x64" }, 46 | //{ platform: "linux", arch: "arm64" }, 47 | //{ platform: "linux", arch: "armv7" }, 48 | ] 49 | } else if(process.platform === 'darwin') { 50 | prebuildConfigs = [ 51 | { platform: "darwin", arch: "x64" }, 52 | { platform: "darwin", arch: "arm64" }, 53 | ] 54 | } 55 | 56 | createPrebuilds(prebuildConfigs) 57 | .then(() => console.log("All builds succeeded")) 58 | .catch((err) => console.error("Error building:", err)); 59 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/callbacks.mjs: -------------------------------------------------------------------------------- 1 | import { promises as fs } from "node:fs"; 2 | import { loadModel, createCompletion } from "../src/gpt4all.js"; 3 | 4 | const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", { 5 | verbose: true, 6 | device: "gpu", 7 | }); 8 | 9 | const res = await createCompletion( 10 | model, 11 | "I've got three 🍣 - What shall I name them?", 12 | { 13 | onPromptToken: (tokenId) => { 14 | console.debug("onPromptToken", { tokenId }); 15 | // throwing an error will cancel 16 | throw new Error("This is an error"); 17 | // const foo = thisMethodDoesNotExist(); 18 | // returning false will cancel as well 19 | // return false; 20 | }, 21 | onResponseToken: (tokenId, token) => { 22 | console.debug("onResponseToken", { tokenId, token }); 23 | // same applies here 24 | }, 25 | } 26 | ); 27 | 28 | console.debug("Output:", { 29 | usage: res.usage, 30 | message: res.choices[0].message, 31 | }); 32 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/chat-memory.mjs: -------------------------------------------------------------------------------- 1 | import { loadModel, createCompletion } from "../src/gpt4all.js"; 2 | 3 | const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", { 4 | verbose: true, 5 | device: "gpu", 6 | }); 7 | 8 | const chat = await model.createChatSession({ 9 | messages: [ 10 | { 11 | role: "user", 12 | content: "I'll tell you a secret password: It's 63445.", 13 | }, 14 | { 15 | role: "assistant", 16 | content: "I will do my best to remember that.", 17 | }, 18 | { 19 | role: "user", 20 | content: 21 | "And here another fun fact: Bananas may be bluer than bread at night.", 22 | }, 23 | { 24 | role: "assistant", 25 | content: "Yes, that makes sense.", 26 | }, 27 | ], 28 | }); 29 | 30 | const turn1 = await createCompletion( 31 | chat, 32 | "Please tell me the secret password." 33 | ); 34 | console.debug(turn1.choices[0].message); 35 | // "The secret password you shared earlier is 63445."" 36 | 37 | const turn2 = await createCompletion( 38 | chat, 39 | "Thanks! Have your heard about the bananas?" 40 | ); 41 | console.debug(turn2.choices[0].message); 42 | 43 | for (let i = 0; i < 32; i++) { 44 | // gpu go brr 45 | const turn = await createCompletion( 46 | chat, 47 | i % 2 === 0 ? "Tell me a fun fact." : "And a boring one?" 48 | ); 49 | console.debug({ 50 | message: turn.choices[0].message, 51 | n_past_tokens: turn.usage.n_past_tokens, 52 | }); 53 | } 54 | 55 | const finalTurn = await createCompletion( 56 | chat, 57 | "Now I forgot the secret password. Can you remind me?" 58 | ); 59 | console.debug(finalTurn.choices[0].message); 60 | 61 | // result of finalTurn may vary depending on whether the generated facts pushed the secret out of the context window. 62 | // "Of course! The secret password you shared earlier is 63445." 63 | // "I apologize for any confusion. As an AI language model, ..." 64 | 65 | model.dispose(); 66 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/chat-minimal.mjs: -------------------------------------------------------------------------------- 1 | import { loadModel, createCompletion } from "../src/gpt4all.js"; 2 | 3 | const model = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", { 4 | verbose: true, 5 | device: "gpu", 6 | }); 7 | 8 | const chat = await model.createChatSession(); 9 | 10 | await createCompletion( 11 | chat, 12 | "Why are bananas rather blue than bread at night sometimes?", 13 | { 14 | verbose: true, 15 | } 16 | ); 17 | await createCompletion(chat, "Are you sure?", { 18 | verbose: true, 19 | }); 20 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/concurrency.mjs: -------------------------------------------------------------------------------- 1 | import { 2 | loadModel, 3 | createCompletion, 4 | } from "../src/gpt4all.js"; 5 | 6 | const modelOptions = { 7 | verbose: true, 8 | }; 9 | 10 | const model1 = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", { 11 | ...modelOptions, 12 | device: "gpu", // only one model can be on gpu 13 | }); 14 | const model2 = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", modelOptions); 15 | const model3 = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", modelOptions); 16 | 17 | const promptContext = { 18 | verbose: true, 19 | } 20 | 21 | const responses = await Promise.all([ 22 | createCompletion(model1, "What is 1 + 1?", promptContext), 23 | // generating with the same model instance will wait for the previous completion to finish 24 | createCompletion(model1, "What is 1 + 1?", promptContext), 25 | // generating with different model instances will run in parallel 26 | createCompletion(model2, "What is 1 + 2?", promptContext), 27 | createCompletion(model3, "What is 1 + 3?", promptContext), 28 | ]); 29 | console.log(responses.map((res) => res.choices[0].message)); 30 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/embed-jsonl.mjs: -------------------------------------------------------------------------------- 1 | import { loadModel, createEmbedding } from '../src/gpt4all.js' 2 | import { createGunzip, createGzip, createUnzip } from 'node:zlib'; 3 | import { Readable } from 'stream' 4 | import readline from 'readline' 5 | const embedder = await loadModel("nomic-embed-text-v1.5.f16.gguf", { verbose: true, type: 'embedding', device: 'gpu' }) 6 | console.log("Running with", embedder.llm.threadCount(), "threads"); 7 | 8 | 9 | const unzip = createGunzip(); 10 | const url = "https://huggingface.co/datasets/sentence-transformers/embedding-training-data/resolve/main/squad_pairs.jsonl.gz" 11 | const stream = await fetch(url) 12 | .then(res => Readable.fromWeb(res.body)); 13 | 14 | const lineReader = readline.createInterface({ 15 | input: stream.pipe(unzip), 16 | crlfDelay: Infinity 17 | }) 18 | 19 | lineReader.on('line', line => { 20 | //pairs of questions and answers 21 | const question_answer = JSON.parse(line) 22 | console.log(createEmbedding(embedder, question_answer)) 23 | }) 24 | 25 | lineReader.on('close', () => embedder.dispose()) 26 | 27 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/embed.mjs: -------------------------------------------------------------------------------- 1 | import { loadModel, createEmbedding } from '../src/gpt4all.js' 2 | 3 | const embedder = await loadModel("nomic-embed-text-v1.5.f16.gguf", { verbose: true, type: 'embedding' , device: 'gpu' }) 4 | 5 | try { 6 | console.log(createEmbedding(embedder, ["Accept your current situation", "12312"], { prefix: "search_document" })) 7 | 8 | } catch(e) { 9 | console.log(e) 10 | } 11 | 12 | embedder.dispose() 13 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/llmodel.mjs: -------------------------------------------------------------------------------- 1 | import { 2 | LLModel, 3 | createCompletion, 4 | DEFAULT_DIRECTORY, 5 | DEFAULT_LIBRARIES_DIRECTORY, 6 | loadModel, 7 | } from "../src/gpt4all.js"; 8 | 9 | const model = await loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf", { 10 | verbose: true, 11 | device: "gpu", 12 | }); 13 | const ll = model.llm; 14 | 15 | try { 16 | class Extended extends LLModel {} 17 | } catch (e) { 18 | console.log("Extending from native class gone wrong " + e); 19 | } 20 | 21 | console.log("state size " + ll.stateSize()); 22 | 23 | console.log("thread count " + ll.threadCount()); 24 | ll.setThreadCount(5); 25 | 26 | console.log("thread count " + ll.threadCount()); 27 | ll.setThreadCount(4); 28 | console.log("thread count " + ll.threadCount()); 29 | console.log("name " + ll.name()); 30 | console.log("type: " + ll.type()); 31 | console.log("Default directory for models", DEFAULT_DIRECTORY); 32 | console.log("Default directory for libraries", DEFAULT_LIBRARIES_DIRECTORY); 33 | console.log("Has GPU", ll.hasGpuDevice()); 34 | console.log("gpu devices", ll.listGpu()); 35 | console.log("Required Mem in bytes", ll.memoryNeeded()); 36 | 37 | // to ingest a custom system prompt without using a chat session. 38 | await createCompletion( 39 | model, 40 | "<|im_start|>system\nYou are an advanced mathematician.\n<|im_end|>\n", 41 | { 42 | promptTemplate: "%1", 43 | nPredict: 0, 44 | special: true, 45 | } 46 | ); 47 | const completion1 = await createCompletion(model, "What is 1 + 1?", { 48 | verbose: true, 49 | }); 50 | console.log(`🤖 > ${completion1.choices[0].message.content}`); 51 | //Very specific: 52 | // tested on Ubuntu 22.0, Linux Mint, if I set nPast to 100, the app hangs. 53 | const completion2 = await createCompletion(model, "And if we add two?", { 54 | verbose: true, 55 | }); 56 | console.log(`🤖 > ${completion2.choices[0].message.content}`); 57 | 58 | //CALLING DISPOSE WILL INVALID THE NATIVE MODEL. USE THIS TO CLEANUP 59 | model.dispose(); 60 | 61 | console.log("model disposed, exiting..."); 62 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/long-context.mjs: -------------------------------------------------------------------------------- 1 | import { promises as fs } from "node:fs"; 2 | import { loadModel, createCompletion } from "../src/gpt4all.js"; 3 | 4 | const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", { 5 | verbose: true, 6 | device: "gpu", 7 | nCtx: 32768, 8 | }); 9 | 10 | const typeDefSource = await fs.readFile("./src/gpt4all.d.ts", "utf-8"); 11 | 12 | const res = await createCompletion( 13 | model, 14 | "Here are the type definitions for the GPT4All API:\n\n" + 15 | typeDefSource + 16 | "\n\nHow do I create a completion with a really large context window?", 17 | { 18 | verbose: true, 19 | } 20 | ); 21 | console.debug(res.choices[0].message); 22 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/model-switching.mjs: -------------------------------------------------------------------------------- 1 | import { loadModel, createCompletion } from "../src/gpt4all.js"; 2 | 3 | const model1 = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", { 4 | device: "gpu", 5 | nCtx: 4096, 6 | }); 7 | 8 | const chat1 = await model1.createChatSession({ 9 | temperature: 0.8, 10 | topP: 0.7, 11 | topK: 60, 12 | }); 13 | 14 | const chat1turn1 = await createCompletion( 15 | chat1, 16 | "Outline a short story concept for adults. About why bananas are rather blue than bread is green at night sometimes. Not too long." 17 | ); 18 | console.debug(chat1turn1.choices[0].message); 19 | 20 | const chat1turn2 = await createCompletion( 21 | chat1, 22 | "Lets sprinkle some plot twists. And a cliffhanger at the end." 23 | ); 24 | console.debug(chat1turn2.choices[0].message); 25 | 26 | const chat1turn3 = await createCompletion( 27 | chat1, 28 | "Analyze your plot. Find the weak points." 29 | ); 30 | console.debug(chat1turn3.choices[0].message); 31 | 32 | const chat1turn4 = await createCompletion( 33 | chat1, 34 | "Rewrite it based on the analysis." 35 | ); 36 | console.debug(chat1turn4.choices[0].message); 37 | 38 | model1.dispose(); 39 | 40 | const model2 = await loadModel("gpt4all-falcon-newbpe-q4_0.gguf", { 41 | device: "gpu", 42 | }); 43 | 44 | const chat2 = await model2.createChatSession({ 45 | messages: chat1.messages, 46 | }); 47 | 48 | const chat2turn1 = await createCompletion( 49 | chat2, 50 | "Give three ideas how this plot could be improved." 51 | ); 52 | console.debug(chat2turn1.choices[0].message); 53 | 54 | const chat2turn2 = await createCompletion( 55 | chat2, 56 | "Revise the plot, applying your ideas." 57 | ); 58 | console.debug(chat2turn2.choices[0].message); 59 | 60 | model2.dispose(); 61 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/stateless.mjs: -------------------------------------------------------------------------------- 1 | import { loadModel, createCompletion } from "../src/gpt4all.js"; 2 | 3 | const model = await loadModel("orca-mini-3b-gguf2-q4_0.gguf", { 4 | verbose: true, 5 | device: "gpu", 6 | }); 7 | 8 | const messages = [ 9 | { 10 | role: "system", 11 | content: "<|im_start|>system\nYou are an advanced mathematician.\n<|im_end|>\n", 12 | }, 13 | { 14 | role: "user", 15 | content: "What's 2+2?", 16 | }, 17 | { 18 | role: "assistant", 19 | content: "5", 20 | }, 21 | { 22 | role: "user", 23 | content: "Are you sure?", 24 | }, 25 | ]; 26 | 27 | 28 | const res1 = await createCompletion(model, messages); 29 | console.debug(res1.choices[0].message); 30 | messages.push(res1.choices[0].message); 31 | 32 | messages.push({ 33 | role: "user", 34 | content: "Could you double check that?", 35 | }); 36 | 37 | const res2 = await createCompletion(model, messages); 38 | console.debug(res2.choices[0].message); 39 | messages.push(res2.choices[0].message); 40 | 41 | messages.push({ 42 | role: "user", 43 | content: "Let's bring out the big calculators.", 44 | }); 45 | 46 | const res3 = await createCompletion(model, messages); 47 | console.debug(res3.choices[0].message); 48 | messages.push(res3.choices[0].message); 49 | 50 | // console.debug(messages); 51 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/streaming.mjs: -------------------------------------------------------------------------------- 1 | import { 2 | loadModel, 3 | createCompletion, 4 | createCompletionStream, 5 | createCompletionGenerator, 6 | } from "../src/gpt4all.js"; 7 | 8 | const model = await loadModel("mistral-7b-openorca.gguf2.Q4_0.gguf", { 9 | device: "gpu", 10 | }); 11 | 12 | process.stdout.write("### Stream:"); 13 | const stream = createCompletionStream(model, "How are you?"); 14 | stream.tokens.on("data", (data) => { 15 | process.stdout.write(data); 16 | }); 17 | await stream.result; 18 | process.stdout.write("\n"); 19 | 20 | process.stdout.write("### Stream with pipe:"); 21 | const stream2 = createCompletionStream( 22 | model, 23 | "Please say something nice about node streams." 24 | ); 25 | stream2.tokens.pipe(process.stdout); 26 | const stream2Res = await stream2.result; 27 | process.stdout.write("\n"); 28 | 29 | process.stdout.write("### Generator:"); 30 | const gen = createCompletionGenerator(model, "generators instead?", { 31 | nPast: stream2Res.usage.n_past_tokens, 32 | }); 33 | for await (const chunk of gen) { 34 | process.stdout.write(chunk); 35 | } 36 | 37 | process.stdout.write("\n"); 38 | 39 | process.stdout.write("### Callback:"); 40 | await createCompletion(model, "Why not just callbacks?", { 41 | onResponseToken: (tokenId, token) => { 42 | process.stdout.write(token); 43 | }, 44 | }); 45 | process.stdout.write("\n"); 46 | 47 | process.stdout.write("### 2nd Generator:"); 48 | const gen2 = createCompletionGenerator(model, "If 3 + 3 is 5, what is 2 + 2?"); 49 | 50 | let chunk = await gen2.next(); 51 | while (!chunk.done) { 52 | process.stdout.write(chunk.value); 53 | chunk = await gen2.next(); 54 | } 55 | process.stdout.write("\n"); 56 | console.debug("generator finished", chunk); 57 | model.dispose(); 58 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/spec/system.mjs: -------------------------------------------------------------------------------- 1 | import { 2 | loadModel, 3 | createCompletion, 4 | } from "../src/gpt4all.js"; 5 | 6 | const model = await loadModel("Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf", { 7 | verbose: true, 8 | device: "gpu", 9 | }); 10 | 11 | const chat = await model.createChatSession({ 12 | verbose: true, 13 | systemPrompt: "<|im_start|>system\nRoleplay as Batman. Answer as if you are Batman, never say you're an Assistant.\n<|im_end|>", 14 | }); 15 | const turn1 = await createCompletion(chat, "You have any plans tonight?"); 16 | console.log(turn1.choices[0].message); 17 | // "I'm afraid I must decline any personal invitations tonight. As Batman, I have a responsibility to protect Gotham City." 18 | 19 | model.dispose(); 20 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/src/config.js: -------------------------------------------------------------------------------- 1 | const os = require("node:os"); 2 | const path = require("node:path"); 3 | 4 | const DEFAULT_DIRECTORY = path.resolve(os.homedir(), ".cache/gpt4all"); 5 | 6 | const librarySearchPaths = [ 7 | path.join(DEFAULT_DIRECTORY, "libraries"), 8 | path.resolve("./libraries"), 9 | path.resolve( 10 | __dirname, 11 | "..", 12 | `runtimes/${process.platform}-${process.arch}/native`, 13 | ), 14 | //for darwin. This is hardcoded for now but it should work 15 | path.resolve( 16 | __dirname, 17 | "..", 18 | `runtimes/${process.platform}/native`, 19 | ), 20 | process.cwd(), 21 | ]; 22 | 23 | const DEFAULT_LIBRARIES_DIRECTORY = librarySearchPaths.join(";"); 24 | 25 | const DEFAULT_MODEL_CONFIG = { 26 | systemPrompt: "", 27 | promptTemplate: "### Human:\n%1\n\n### Assistant:\n", 28 | } 29 | 30 | const DEFAULT_MODEL_LIST_URL = "https://gpt4all.io/models/models3.json"; 31 | 32 | const DEFAULT_PROMPT_CONTEXT = { 33 | temp: 0.1, 34 | topK: 40, 35 | topP: 0.9, 36 | minP: 0.0, 37 | repeatPenalty: 1.18, 38 | repeatLastN: 10, 39 | nBatch: 100, 40 | } 41 | 42 | module.exports = { 43 | DEFAULT_DIRECTORY, 44 | DEFAULT_LIBRARIES_DIRECTORY, 45 | DEFAULT_MODEL_CONFIG, 46 | DEFAULT_MODEL_LIST_URL, 47 | DEFAULT_PROMPT_CONTEXT, 48 | }; 49 | -------------------------------------------------------------------------------- /gpt4all-bindings/typescript/test/models.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "order": "a", 4 | "md5sum": "08d6c05a21512a79a1dfeb9d2a8f262f", 5 | "name": "Not a real model", 6 | "filename": "fake-model.gguf", 7 | "filesize": "4", 8 | "systemPrompt": " " 9 | } 10 | ] 11 | -------------------------------------------------------------------------------- /gpt4all-chat/.flake8: -------------------------------------------------------------------------------- 1 | # vim: set syntax=dosini: 2 | [flake8] 3 | exclude = .*,__pycache__ 4 | max-line-length = 120 5 | extend-ignore = B001,C408,D,DAR,E221,E303,E722,E741,E800,N801,N806,P101,S101,S324,S404,S406,S410,S603,WPS100,WPS110,WPS111,WPS113,WPS114,WPS115,WPS120,WPS2,WPS300,WPS301,WPS304,WPS305,WPS306,WPS309,WPS316,WPS317,WPS318,WPS319,WPS322,WPS323,WPS326,WPS329,WPS330,WPS332,WPS336,WPS337,WPS347,WPS360,WPS361,WPS407,WPS414,WPS420,WPS421,WPS429,WPS430,WPS431,WPS432,WPS433,WPS437,WPS440,WPS440,WPS441,WPS442,WPS457,WPS458,WPS460,WPS462,WPS463,WPS473,WPS501,WPS504,WPS505,WPS508,WPS509,WPS510,WPS515,WPS516,WPS519,WPS520,WPS529,WPS531,WPS602,WPS604,WPS605,WPS608,WPS609,WPS613,WPS615 6 | -------------------------------------------------------------------------------- /gpt4all-chat/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023-2024 Nomic, Inc. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | 9 | ADDENDUM: 10 | 11 | Any LLM models that are loaded and used by the application are not themselves 12 | subject to this license if indeed they are even copyrightable. The terms of 13 | this license apply only to the application software and its accompanying 14 | documentation and do not extend to any LLM models, whether created by the 15 | author of the application or obtained from third-party sources. 16 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/Modules/SignMacOSBinaries.cmake: -------------------------------------------------------------------------------- 1 | function(install_sign_osx tgt) 2 | install(CODE "execute_process(COMMAND codesign --options runtime --timestamp -s \"${MAC_SIGNING_IDENTITY}\" $)") 3 | endfunction() -------------------------------------------------------------------------------- /gpt4all-chat/cmake/Modules/SignWindowsBinaries.cmake: -------------------------------------------------------------------------------- 1 | function(sign_target_windows tgt) 2 | if(WIN32 AND GPT4ALL_SIGN_INSTALL) 3 | add_custom_command(TARGET ${tgt} 4 | POST_BUILD 5 | COMMAND AzureSignTool.exe sign 6 | -du "https://www.nomic.ai/gpt4all" 7 | -kvu https://gpt4all.vault.azure.net 8 | -kvi "$Env{AZSignGUID}" 9 | -kvs "$Env{AZSignPWD}" 10 | -kvc "$Env{AZSignCertName}" 11 | -kvt "$Env{AZSignTID}" 12 | -tr http://timestamp.digicert.com 13 | -v 14 | $ 15 | ) 16 | endif() 17 | endfunction() 18 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/cpack-steal-config.cmake.in: -------------------------------------------------------------------------------- 1 | set(OUTPUT_DIR "@CMAKE_BINARY_DIR@") 2 | file(COPY ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/config DESTINATION ${OUTPUT_DIR}/cpack-config) 3 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/deploy-qt-linux.cmake.in: -------------------------------------------------------------------------------- 1 | set(LINUXDEPLOYQT "@LINUXDEPLOYQT@") 2 | set(COMPONENT_NAME_MAIN "@COMPONENT_NAME_MAIN@") 3 | set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@") 4 | set(DATA_DIR ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) 5 | set(BIN_DIR ${DATA_DIR}/bin) 6 | set(Qt6_ROOT_DIR "@Qt6_ROOT_DIR@") 7 | set(ENV{LD_LIBRARY_PATH} "${BIN_DIR}:${Qt6_ROOT_DIR}/../lib/") 8 | execute_process(COMMAND ${LINUXDEPLOYQT} ${BIN_DIR}/chat -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -bundle-non-qt-libs -qmake=${Qt6_ROOT_DIR}/bin/qmake -verbose=2 -exclude-libs=libcuda.so.1) 9 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png" 10 | DESTINATION ${DATA_DIR}) 11 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png" 12 | DESTINATION ${DATA_DIR}) 13 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/deploy-qt-mac.cmake.in: -------------------------------------------------------------------------------- 1 | set(MACDEPLOYQT "@MACDEPLOYQT@") 2 | set(COMPONENT_NAME_MAIN "@COMPONENT_NAME_MAIN@") 3 | set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@") 4 | set(GPT4ALL_SIGN_INSTALL "@GPT4ALL_SIGN_INSTALL@") 5 | set(GPT4ALL_SIGNING_ID "@MAC_SIGNING_IDENTITY@") 6 | set(CPACK_CONFIG_DIR "@CMAKE_BINARY_DIR@") 7 | if (GPT4ALL_SIGN_INSTALL) 8 | set(MAC_NOTARIZE -sign-for-notarization=${GPT4ALL_SIGNING_ID}) 9 | endif() 10 | execute_process(COMMAND ${MACDEPLOYQT} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app -qmldir=${CMAKE_CURRENT_SOURCE_DIR} -verbose=2 ${MAC_NOTARIZE}) 11 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png" 12 | DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) 13 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png" 14 | DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) 15 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.icns" 16 | DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) 17 | 18 | if (GPT4ALL_SIGN_INSTALL) 19 | # Create signed MaintenanceTool 20 | set(MT_DATA_DIR ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/maintenancetool/data) 21 | file(MAKE_DIRECTORY ${MT_DATA_DIR}) 22 | execute_process( 23 | COMMAND binarycreator --config ${CPACK_CONFIG_DIR}/cpack-config/config/config.xml --create-maintenancetool --sign ${GPT4ALL_SIGNING_ID} 24 | WORKING_DIRECTORY ${MT_DATA_DIR} 25 | ) 26 | endif() 27 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/deploy-qt-windows.cmake.in: -------------------------------------------------------------------------------- 1 | set(WINDEPLOYQT "@WINDEPLOYQT@") 2 | set(COMPONENT_NAME_MAIN "@COMPONENT_NAME_MAIN@") 3 | set(CMAKE_CURRENT_SOURCE_DIR "@CMAKE_CURRENT_SOURCE_DIR@") 4 | execute_process(COMMAND ${WINDEPLOYQT} --qmldir ${CMAKE_CURRENT_SOURCE_DIR} ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin) 5 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-32.png" 6 | DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) 7 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/icons/gpt4all-48.png" 8 | DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) 9 | file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/resources/gpt4all.ico" 10 | DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data) 11 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/download_model.cmake: -------------------------------------------------------------------------------- 1 | if(NOT DEFINED URL OR NOT DEFINED OUTPUT_PATH OR NOT DEFINED EXPECTED_MD5) 2 | message(FATAL_ERROR "Usage: cmake -DURL= -DOUTPUT_PATH= -DEXPECTED_MD5= -P download_model.cmake") 3 | endif() 4 | 5 | message(STATUS "Downloading model from ${URL} to ${OUTPUT_PATH} ...") 6 | 7 | file(DOWNLOAD "${URL}" "${OUTPUT_PATH}" EXPECTED_MD5 "${EXPECTED_MD5}" STATUS status) 8 | 9 | list(GET status 0 status_code) 10 | if(NOT status_code EQUAL 0) 11 | message(FATAL_ERROR "Failed to download model: ${status}") 12 | endif() 13 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/installer_control.qs: -------------------------------------------------------------------------------- 1 | var finishedText = null; 2 | 3 | function cancelInstaller(message) { 4 | installer.setDefaultPageVisible(QInstaller.Introduction, false); 5 | installer.setDefaultPageVisible(QInstaller.TargetDirectory, false); 6 | installer.setDefaultPageVisible(QInstaller.ComponentSelection, false); 7 | installer.setDefaultPageVisible(QInstaller.ReadyForInstallation, false); 8 | installer.setDefaultPageVisible(QInstaller.StartMenuSelection, false); 9 | installer.setDefaultPageVisible(QInstaller.PerformInstallation, false); 10 | installer.setDefaultPageVisible(QInstaller.LicenseCheck, false); 11 | finishedText = message; 12 | installer.setCanceled(); 13 | } 14 | 15 | function vercmp(a, b) { 16 | return a.localeCompare(b, undefined, { numeric: true, sensitivity: "base" }); 17 | } 18 | 19 | function Controller() { 20 | } 21 | 22 | Controller.prototype.TargetDirectoryPageCallback = function() { 23 | var failedReq = null; 24 | if (systemInfo.productType === "ubuntu" && vercmp(systemInfo.productVersion, "22.04") < 0) { 25 | failedReq = "Ubuntu 22.04 LTS"; 26 | } else if (systemInfo.productType === "macos" && vercmp(systemInfo.productVersion, "12.6") < 0) { 27 | failedReq = "macOS Monterey 12.6"; 28 | } 29 | 30 | if (failedReq !== null) { 31 | cancelInstaller( 32 | "Installation cannot continue because GPT4All does not support your operating system: " + 33 | `${systemInfo.prettyProductName}

` + 34 | `GPT4All requires ${failedReq} or newer.` 35 | ); 36 | } 37 | } 38 | 39 | Controller.prototype.FinishedPageCallback = function() { 40 | const widget = gui.currentPageWidget(); 41 | if (widget != null && finishedText != null) { 42 | widget.MessageLabel.setText(finishedText); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /gpt4all-chat/cmake/installer_maintenancetool_component.qs: -------------------------------------------------------------------------------- 1 | function Component() 2 | { 3 | component.ifwVersion = installer.value("FrameworkVersion"); 4 | installer.installationStarted.connect(this, Component.prototype.onInstallationStarted); 5 | } 6 | 7 | Component.prototype.onInstallationStarted = function() 8 | { 9 | if (component.updateRequested() || component.installationRequested()) { 10 | if (installer.value("os") == "win") { 11 | component.installerbaseBinaryPath = "@TargetDir@/installerbase.exe"; 12 | } else if (installer.value("os") == "x11") { 13 | component.installerbaseBinaryPath = "@TargetDir@/installerbase"; 14 | } else if (installer.value("os") == "mac") { 15 | component.installerbaseBinaryPath = "@TargetDir@/MaintenanceTool.app"; 16 | } 17 | installer.setInstallerBaseBinary(component.installerbaseBinaryPath); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /gpt4all-chat/deps/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include(FetchContent) 2 | 3 | 4 | set(BUILD_SHARED_LIBS OFF) 5 | 6 | set(FMT_INSTALL OFF) 7 | add_subdirectory(fmt) 8 | 9 | set(QAPPLICATION_CLASS QApplication) 10 | add_subdirectory(SingleApplication) 11 | 12 | set(DUCKX_INSTALL OFF) 13 | add_subdirectory(DuckX) 14 | 15 | set(QT_VERSION_MAJOR 6) 16 | add_subdirectory(QXlsx/QXlsx) 17 | 18 | if (NOT GPT4ALL_USING_QTPDF) 19 | # If we do not use QtPDF, we need to get PDFium. 20 | set(GPT4ALL_PDFIUM_TAG "chromium/6996") 21 | if (CMAKE_SYSTEM_NAME MATCHES Linux) 22 | FetchContent_Declare( 23 | pdfium 24 | URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-linux-x64.tgz" 25 | URL_HASH "SHA256=68b381b87efed539f2e33ae1e280304c9a42643a878cc296c1d66a93b0cb4335" 26 | ) 27 | elseif (CMAKE_SYSTEM_NAME MATCHES Windows) 28 | if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|AMD64|amd64)$") 29 | FetchContent_Declare( 30 | pdfium 31 | URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-x64.tgz" 32 | URL_HASH "SHA256=83e714c302ceacccf403826d5cb57ea39b77f393d83b8d5781283012774a9378" 33 | ) 34 | elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|AARCH64|arm64|ARM64)$") 35 | FetchContent_Declare( 36 | pdfium 37 | URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-win-arm64.tgz" 38 | URL_HASH "SHA256=78e77e871453a4915cbf66fb381b951c9932f88a747c6b2b33c9f27ec2371445" 39 | ) 40 | endif() 41 | elseif (CMAKE_SYSTEM_NAME MATCHES Darwin) 42 | FetchContent_Declare( 43 | pdfium 44 | URL "https://github.com/bblanchon/pdfium-binaries/releases/download/${GPT4ALL_PDFIUM_TAG}/pdfium-mac-univ.tgz" 45 | URL_HASH "SHA256=e7577f3242ff9c1df50025f9615673a43601a201bc51ee4792975f98920793a2" 46 | ) 47 | endif() 48 | 49 | FetchContent_MakeAvailable(pdfium) 50 | find_package(PDFium REQUIRED PATHS "${pdfium_SOURCE_DIR}" NO_DEFAULT_PATH) 51 | endif() 52 | -------------------------------------------------------------------------------- /gpt4all-chat/dev-requirements.txt: -------------------------------------------------------------------------------- 1 | -r test-requirements.txt 2 | 3 | # dev tools 4 | flake8~=7.1 5 | mypy~=1.12 6 | pytype>=2024.10.11 7 | wemake-python-styleguide~=0.19.2 8 | 9 | # type stubs and other optional modules 10 | types-requests~=2.32 11 | urllib3[socks] 12 | -------------------------------------------------------------------------------- /gpt4all-chat/flatpak-manifest/io.gpt4all.gpt4all.desktop: -------------------------------------------------------------------------------- 1 | [Desktop Entry] 2 | Name=GPT4ALL 3 | GenericName=Open-source assistant-style large language models that run locally on your CPU 4 | Comment=Run any GPT4All model natively on your home desktop with the auto-updating desktop chat client. See GPT4All Website for a full list of open-source models you can run with this powerful desktop application. 5 | Exec=chat 6 | Icon=io.gpt4all.gpt4all 7 | Type=Application 8 | Categories=Utility;Office; 9 | Keywords=GPT,Chat;AI 10 | -------------------------------------------------------------------------------- /gpt4all-chat/flatpak-manifest/screenshots/chat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/flatpak-manifest/screenshots/chat.png -------------------------------------------------------------------------------- /gpt4all-chat/flatpak-manifest/screenshots/models.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/flatpak-manifest/screenshots/models.png -------------------------------------------------------------------------------- /gpt4all-chat/flatpak-manifest/screenshots/welcome.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/flatpak-manifest/screenshots/welcome.png -------------------------------------------------------------------------------- /gpt4all-chat/icons/antenna_1.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/antenna_2.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/antenna_3.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/caret_down.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/caret_right.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/changelog.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/chat.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/check.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/close.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/copy.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/db.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/download.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/edit.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/eject.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/email.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/file-doc.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/file-docx.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/file-md.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/file-pdf.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/file-txt.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/file-xls.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/file.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/globe.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/gpt4all-32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/icons/gpt4all-32.png -------------------------------------------------------------------------------- /gpt4all-chat/icons/gpt4all-48.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/icons/gpt4all-48.png -------------------------------------------------------------------------------- /gpt4all-chat/icons/groq.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/home.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/image.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/info.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/left_panel_closed.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/left_panel_open.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/local-docs.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/models.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/network.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/nomic_logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/notes.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/openai.svg: -------------------------------------------------------------------------------- 1 | 2 | OpenAI icon -------------------------------------------------------------------------------- /gpt4all-chat/icons/paperclip.svg: -------------------------------------------------------------------------------- 1 | 2 | 12 | 14 | 32 | 37 | 45 | 46 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/plus.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/plus_circle.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/question.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/recycle.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/redo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/regenerate.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/search.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/send_message.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/stack.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/stop_generating.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/thumbs_down.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/thumbs_up.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/trash.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/twitter.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/undo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/up_down.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/icons/webpage.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gpt4all-chat/metadata/latestnews.md: -------------------------------------------------------------------------------- 1 | ## Latest News 2 | 3 | GPT4All v3.10.0 was released on February 24th. Changes include: 4 | 5 | * **Remote Models:** 6 | * The Add Model page now has a dedicated tab for remote model providers. 7 | * Groq, OpenAI, and Mistral remote models are now easier to configure. 8 | * **CUDA Compatibility:** GPUs with CUDA compute capability 5.0 such as the GTX 750 are now supported by the CUDA backend. 9 | * **New Model:** The non-MoE Granite model is now supported. 10 | * **Translation Updates:** 11 | * The Italian translation has been updated. 12 | * The Simplified Chinese translation has been significantly improved. 13 | * **Better Chat Templates:** The default chat templates for OLMoE 7B 0924/0125 and Granite 3.1 3B/8B have been improved. 14 | * **Whitespace Fixes:** DeepSeek-R1-based models now have better whitespace behavior in their output. 15 | * **Crash Fixes:** Several issues that could potentially cause GPT4All to crash have been fixed. 16 | -------------------------------------------------------------------------------- /gpt4all-chat/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.pytest.ini_options] 2 | addopts = ['--import-mode=importlib'] 3 | 4 | [tool.mypy] 5 | files = 'tests/python' 6 | pretty = true 7 | strict = true 8 | warn_unused_ignores = false 9 | 10 | [tool.pytype] 11 | inputs = ['tests/python'] 12 | jobs = 'auto' 13 | bind_decorated_methods = true 14 | none_is_not_bool = true 15 | overriding_renamed_parameter_count_checks = true 16 | strict_none_binding = true 17 | precise_return = true 18 | # protocols: 19 | # - https://github.com/google/pytype/issues/1423 20 | # - https://github.com/google/pytype/issues/1424 21 | strict_import = true 22 | strict_parameter_checks = true 23 | strict_primitive_comparisons = true 24 | # strict_undefined_checks: too many false positives 25 | 26 | [tool.isort] 27 | src_paths = ['tests/python'] 28 | line_length = 120 29 | combine_as_imports = true 30 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/ChatMessageButton.qml: -------------------------------------------------------------------------------- 1 | import QtQuick 2 | import QtQuick.Controls 3 | 4 | import gpt4all 5 | 6 | MyToolButton { 7 | property string name 8 | 9 | width: 24 10 | height: 24 11 | imageWidth: width 12 | imageHeight: height 13 | ToolTip { 14 | visible: parent.hovered 15 | y: parent.height * 1.5 16 | text: name 17 | delay: Qt.styleHints.mousePressAndHoldInterval 18 | } 19 | Accessible.name: name 20 | } 21 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/ConfirmationDialog.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import QtQuick.Layouts 6 | 7 | MyDialog { 8 | id: confirmationDialog 9 | anchors.centerIn: parent 10 | modal: true 11 | padding: 20 12 | property alias dialogTitle: titleText.text 13 | property alias description: descriptionText.text 14 | 15 | Theme { id: theme } 16 | 17 | contentItem: ColumnLayout { 18 | Text { 19 | id: titleText 20 | Layout.alignment: Qt.AlignHCenter 21 | textFormat: Text.StyledText 22 | color: theme.textColor 23 | font.pixelSize: theme.fontSizeLarger 24 | font.bold: true 25 | } 26 | 27 | Text { 28 | id: descriptionText 29 | Layout.alignment: Qt.AlignHCenter 30 | textFormat: Text.StyledText 31 | color: theme.textColor 32 | font.pixelSize: theme.fontSizeMedium 33 | } 34 | } 35 | 36 | footer: DialogButtonBox { 37 | id: dialogBox 38 | padding: 20 39 | alignment: Qt.AlignRight 40 | spacing: 10 41 | MySettingsButton { 42 | text: qsTr("OK") 43 | textColor: theme.mediumButtonText 44 | backgroundColor: theme.mediumButtonBackground 45 | backgroundColorHovered: theme.mediumButtonBackgroundHovered 46 | DialogButtonBox.buttonRole: DialogButtonBox.AcceptRole 47 | } 48 | MySettingsButton { 49 | text: qsTr("Cancel") 50 | DialogButtonBox.buttonRole: DialogButtonBox.RejectRole 51 | } 52 | background: Rectangle { 53 | color: "transparent" 54 | } 55 | Keys.onEnterPressed: confirmationDialog.accept() 56 | Keys.onReturnPressed: confirmationDialog.accept() 57 | } 58 | Component.onCompleted: dialogBox.forceActiveFocus() 59 | } 60 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyBusyIndicator.qml: -------------------------------------------------------------------------------- 1 | import QtQuick 2 | import QtQuick.Controls 3 | import QtQuick.Controls.Basic 4 | 5 | BusyIndicator { 6 | id: control 7 | 8 | property real size: 48 9 | property color color: theme.accentColor 10 | 11 | contentItem: Item { 12 | implicitWidth: control.size 13 | implicitHeight: control.size 14 | 15 | Item { 16 | id: item 17 | x: parent.width / 2 - width / 2 18 | y: parent.height / 2 - height / 2 19 | width: control.size 20 | height: control.size 21 | opacity: control.running ? 1 : 0 22 | 23 | Behavior on opacity { 24 | OpacityAnimator { 25 | duration: 250 26 | } 27 | } 28 | 29 | RotationAnimator { 30 | target: item 31 | running: control.visible && control.running 32 | from: 0 33 | to: 360 34 | loops: Animation.Infinite 35 | duration: 1750 36 | } 37 | 38 | Repeater { 39 | id: repeater 40 | model: 6 41 | 42 | Rectangle { 43 | id: delegate 44 | x: item.width / 2 - width / 2 45 | y: item.height / 2 - height / 2 46 | implicitWidth: control.size * .2 47 | implicitHeight: control.size * .2 48 | radius: control.size * .1 49 | color: control.color 50 | 51 | required property int index 52 | 53 | transform: [ 54 | Translate { 55 | y: -Math.min(item.width, item.height) * 0.5 + delegate.radius 56 | }, 57 | Rotation { 58 | angle: delegate.index / repeater.count * 360 59 | origin.x: delegate.radius 60 | origin.y: delegate.radius 61 | } 62 | ] 63 | } 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyButton.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import mysettings 6 | import mysettingsenums 7 | 8 | Button { 9 | id: myButton 10 | padding: 10 11 | rightPadding: 18 12 | leftPadding: 18 13 | property color textColor: theme.oppositeTextColor 14 | property color mutedTextColor: theme.oppositeMutedTextColor 15 | property color backgroundColor: theme.buttonBackground 16 | property color backgroundColorHovered: theme.buttonBackgroundHovered 17 | property real backgroundRadius: 10 18 | property real borderWidth: MySettings.chatTheme === MySettingsEnums.ChatTheme.LegacyDark ? 1 : 0 19 | property color borderColor: theme.buttonBorder 20 | property real fontPixelSize: theme.fontSizeLarge 21 | property bool fontPixelBold: false 22 | property alias textAlignment: textContent.horizontalAlignment 23 | 24 | contentItem: Text { 25 | id: textContent 26 | text: myButton.text 27 | horizontalAlignment: myButton.textAlignment 28 | color: myButton.enabled ? textColor : mutedTextColor 29 | font.pixelSize: fontPixelSize 30 | font.bold: fontPixelBold 31 | Accessible.role: Accessible.Button 32 | Accessible.name: text 33 | } 34 | background: Rectangle { 35 | radius: myButton.backgroundRadius 36 | border.width: myButton.borderWidth 37 | border.color: myButton.borderColor 38 | color: !myButton.enabled ? theme.mutedTextColor : myButton.hovered ? backgroundColorHovered : backgroundColor 39 | } 40 | Accessible.role: Accessible.Button 41 | Accessible.name: text 42 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 43 | } 44 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyCheckBox.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | 6 | CheckBox { 7 | id: myCheckBox 8 | 9 | background: Rectangle { 10 | color: "transparent" 11 | } 12 | 13 | indicator: Rectangle { 14 | implicitWidth: 26 15 | implicitHeight: 26 16 | x: myCheckBox.leftPadding 17 | y: parent.height / 2 - height / 2 18 | border.color: theme.checkboxBorder 19 | color: "transparent" 20 | radius: 3 21 | 22 | Rectangle { 23 | width: 14 24 | height: 14 25 | x: 6 26 | y: 6 27 | radius: 2 28 | color: theme.checkboxForeground 29 | visible: myCheckBox.checked 30 | } 31 | } 32 | 33 | contentItem: Text { 34 | text: myCheckBox.text 35 | font: myCheckBox.font 36 | opacity: enabled ? 1.0 : 0.3 37 | color: theme.textColor 38 | verticalAlignment: Text.AlignVCenter 39 | leftPadding: myCheckBox.indicator.width + myCheckBox.spacing 40 | } 41 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 42 | } -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyDialog.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import QtQuick.Dialogs 6 | import QtQuick.Layouts 7 | 8 | Dialog { 9 | id: myDialog 10 | parent: Overlay.overlay 11 | property alias closeButtonVisible: myCloseButton.visible 12 | background: Rectangle { 13 | width: parent.width 14 | height: parent.height 15 | color: theme.containerBackground 16 | border.width: 1 17 | border.color: theme.dialogBorder 18 | radius: 10 19 | } 20 | 21 | Rectangle { 22 | id: closeBackground 23 | visible: myCloseButton.visible 24 | z: 299 25 | anchors.centerIn: myCloseButton 26 | width: myCloseButton.width + 10 27 | height: myCloseButton.height + 10 28 | color: theme.containerBackground 29 | } 30 | 31 | MyToolButton { 32 | id: myCloseButton 33 | x: 0 + myDialog.width - myDialog.padding - width - 15 34 | y: 0 - myDialog.padding + 15 35 | z: 300 36 | visible: myDialog.closePolicy != Popup.NoAutoClose 37 | width: 24 38 | height: 24 39 | imageWidth: 24 40 | imageHeight: 24 41 | padding: 0 42 | source: "qrc:/gpt4all/icons/close.svg" 43 | fillMode: Image.PreserveAspectFit 44 | onClicked: { 45 | myDialog.close(); 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyDirectoryField.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import llm 6 | 7 | TextField { 8 | id: myDirectoryField 9 | padding: 10 10 | property bool isValid: LLM.directoryExists(text) 11 | color: text === "" || isValid ? theme.textColor : theme.textErrorColor 12 | background: Rectangle { 13 | implicitWidth: 150 14 | color: theme.controlBackground 15 | border.width: 1 16 | border.color: theme.controlBorder 17 | radius: 10 18 | } 19 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 20 | } 21 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyFancyLink.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import Qt5Compat.GraphicalEffects 6 | import mysettings 7 | 8 | MyButton { 9 | id: fancyLink 10 | property alias imageSource: myimage.source 11 | 12 | Image { 13 | id: myimage 14 | anchors.verticalCenter: parent.verticalCenter 15 | anchors.left: parent.left 16 | anchors.leftMargin: 12 17 | sourceSize: Qt.size(15, 15) 18 | mipmap: true 19 | visible: false 20 | } 21 | 22 | ColorOverlay { 23 | anchors.fill: myimage 24 | source: myimage 25 | color: fancyLink.hovered ? theme.fancyLinkTextHovered : theme.fancyLinkText 26 | } 27 | 28 | borderWidth: 0 29 | backgroundColor: "transparent" 30 | backgroundColorHovered: "transparent" 31 | fontPixelBold: true 32 | leftPadding: 35 33 | rightPadding: 8 34 | topPadding: 1 35 | bottomPadding: 1 36 | textColor: fancyLink.hovered ? theme.fancyLinkTextHovered : theme.fancyLinkText 37 | fontPixelSize: theme.fontSizeSmall 38 | background: Rectangle { 39 | color: "transparent" 40 | } 41 | 42 | Accessible.name: qsTr("Fancy link") 43 | Accessible.description: qsTr("A stylized link") 44 | } 45 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyFileDialog.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Dialogs 4 | 5 | FileDialog { 6 | id: fileDialog 7 | title: qsTr("Please choose a file") 8 | property var acceptedConnection: null 9 | 10 | function openFileDialog(currentFolder, onAccepted) { 11 | fileDialog.currentFolder = currentFolder; 12 | if (acceptedConnection !== null) { 13 | fileDialog.accepted.disconnect(acceptedConnection); 14 | } 15 | acceptedConnection = function() { onAccepted(fileDialog.selectedFile); }; 16 | fileDialog.accepted.connect(acceptedConnection); 17 | fileDialog.open(); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyFileIcon.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import Qt5Compat.GraphicalEffects 6 | 7 | Item { 8 | id: fileIcon 9 | property real iconSize: 24 10 | property string fileName: "" 11 | implicitWidth: iconSize 12 | implicitHeight: iconSize 13 | 14 | Image { 15 | id: fileImage 16 | anchors.fill: parent 17 | visible: false 18 | sourceSize.width: iconSize 19 | sourceSize.height: iconSize 20 | mipmap: true 21 | source: { 22 | if (fileIcon.fileName.toLowerCase().endsWith(".txt")) 23 | return "qrc:/gpt4all/icons/file-txt.svg" 24 | else if (fileIcon.fileName.toLowerCase().endsWith(".pdf")) 25 | return "qrc:/gpt4all/icons/file-pdf.svg" 26 | else if (fileIcon.fileName.toLowerCase().endsWith(".md")) 27 | return "qrc:/gpt4all/icons/file-md.svg" 28 | else if (fileIcon.fileName.toLowerCase().endsWith(".xlsx")) 29 | return "qrc:/gpt4all/icons/file-xls.svg" 30 | else if (fileIcon.fileName.toLowerCase().endsWith(".docx")) 31 | return "qrc:/gpt4all/icons/file-docx.svg" 32 | else 33 | return "qrc:/gpt4all/icons/file.svg" 34 | } 35 | } 36 | ColorOverlay { 37 | anchors.fill: fileImage 38 | source: fileImage 39 | color: theme.textColor 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyFolderDialog.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Dialogs 4 | 5 | FolderDialog { 6 | id: folderDialog 7 | title: qsTr("Please choose a directory") 8 | 9 | function openFolderDialog(currentFolder, onAccepted) { 10 | folderDialog.currentFolder = currentFolder; 11 | folderDialog.accepted.connect(function() { onAccepted(folderDialog.selectedFolder); }); 12 | folderDialog.open(); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyMenuItem.qml: -------------------------------------------------------------------------------- 1 | import Qt5Compat.GraphicalEffects 2 | import QtCore 3 | import QtQuick 4 | import QtQuick.Controls 5 | import QtQuick.Controls.Basic 6 | import QtQuick.Layouts 7 | 8 | MenuItem { 9 | id: item 10 | background: Rectangle { 11 | radius: 10 12 | width: parent.width -20 13 | color: item.highlighted ? theme.menuHighlightColor : theme.menuBackgroundColor 14 | } 15 | 16 | contentItem: RowLayout { 17 | spacing: 0 18 | Item { 19 | visible: item.icon.source.toString() !== "" 20 | Layout.leftMargin: 6 21 | Layout.preferredWidth: item.icon.width 22 | Layout.preferredHeight: item.icon.height 23 | Image { 24 | id: image 25 | anchors.centerIn: parent 26 | visible: false 27 | fillMode: Image.PreserveAspectFit 28 | mipmap: true 29 | sourceSize.width: item.icon.width 30 | sourceSize.height: item.icon.height 31 | source: item.icon.source 32 | } 33 | ColorOverlay { 34 | anchors.fill: image 35 | source: image 36 | color: theme.textColor 37 | } 38 | } 39 | Text { 40 | Layout.alignment: Qt.AlignLeft 41 | padding: 5 42 | text: item.text 43 | color: theme.textColor 44 | font.pixelSize: theme.fontSizeLarge 45 | } 46 | Rectangle { 47 | color: "transparent" 48 | Layout.fillWidth: true 49 | height: 1 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyMiniButton.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import Qt5Compat.GraphicalEffects 6 | 7 | Button { 8 | id: myButton 9 | padding: 0 10 | property color backgroundColor: theme.iconBackgroundDark 11 | property color backgroundColorHovered: theme.iconBackgroundHovered 12 | property alias source: image.source 13 | property alias fillMode: image.fillMode 14 | implicitWidth: 30 15 | implicitHeight: 30 16 | contentItem: Text { 17 | text: myButton.text 18 | horizontalAlignment: Text.AlignHCenter 19 | color: myButton.enabled ? theme.textColor : theme.mutedTextColor 20 | font.pixelSize: theme.fontSizeLarge 21 | Accessible.role: Accessible.Button 22 | Accessible.name: text 23 | } 24 | 25 | background: Item { 26 | anchors.fill: parent 27 | Rectangle { 28 | anchors.fill: parent 29 | color: "transparent" 30 | } 31 | Image { 32 | id: image 33 | anchors.centerIn: parent 34 | visible: false 35 | mipmap: true 36 | sourceSize.width: 16 37 | sourceSize.height: 16 38 | } 39 | ColorOverlay { 40 | anchors.fill: image 41 | source: image 42 | color: myButton.hovered ? backgroundColorHovered : backgroundColor 43 | } 44 | } 45 | Accessible.role: Accessible.Button 46 | Accessible.name: text 47 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 48 | } 49 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MySettingsButton.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import mysettings 6 | 7 | Button { 8 | id: myButton 9 | padding: 10 10 | rightPadding: 18 11 | leftPadding: 18 12 | property color textColor: theme.lightButtonText 13 | property color mutedTextColor: theme.lightButtonMutedText 14 | property color backgroundColor: theme.lightButtonBackground 15 | property color backgroundColorHovered: enabled ? theme.lightButtonBackgroundHovered : backgroundColor 16 | property real borderWidth: 0 17 | property color borderColor: "transparent" 18 | property real fontPixelSize: theme.fontSizeLarge 19 | property string toolTip 20 | property alias backgroundRadius: background.radius 21 | 22 | contentItem: Text { 23 | text: myButton.text 24 | horizontalAlignment: Text.AlignHCenter 25 | color: myButton.enabled ? textColor : mutedTextColor 26 | font.pixelSize: fontPixelSize 27 | font.bold: true 28 | Accessible.role: Accessible.Button 29 | Accessible.name: text 30 | } 31 | background: Rectangle { 32 | id: background 33 | radius: 10 34 | border.width: borderWidth 35 | border.color: borderColor 36 | color: myButton.hovered ? backgroundColorHovered : backgroundColor 37 | } 38 | Accessible.role: Accessible.Button 39 | Accessible.name: text 40 | ToolTip.text: toolTip 41 | ToolTip.visible: toolTip !== "" && myButton.hovered 42 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 43 | } 44 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MySettingsDestructiveButton.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import mysettings 6 | 7 | Button { 8 | id: myButton 9 | padding: 10 10 | rightPadding: 18 11 | leftPadding: 18 12 | font.pixelSize: theme.fontSizeLarge 13 | property color textColor: theme.darkButtonText 14 | property color mutedTextColor: theme.darkButtonMutedText 15 | property color backgroundColor: theme.darkButtonBackground 16 | property color backgroundColorHovered: enabled ? theme.darkButtonBackgroundHovered : backgroundColor 17 | property real borderWidth: 0 18 | property color borderColor: "transparent" 19 | 20 | contentItem: Text { 21 | text: myButton.text 22 | horizontalAlignment: Text.AlignHCenter 23 | color: myButton.enabled ? textColor : mutedTextColor 24 | font.pixelSize: theme.fontSizeLarge 25 | font.bold: true 26 | Accessible.role: Accessible.Button 27 | Accessible.name: text 28 | } 29 | background: Rectangle { 30 | radius: 10 31 | border.width: borderWidth 32 | border.color: borderColor 33 | color: myButton.hovered ? backgroundColorHovered : backgroundColor 34 | } 35 | Accessible.role: Accessible.Button 36 | Accessible.name: text 37 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 38 | } 39 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MySlug.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | 6 | Label { 7 | id: mySlug 8 | padding: 3 9 | rightPadding: 9 10 | leftPadding: 9 11 | font.pixelSize: theme.fontSizeSmall 12 | background: Rectangle { 13 | radius: 6 14 | border.width: 1 15 | border.color: mySlug.color 16 | color: theme.slugBackground 17 | } 18 | ToolTip.visible: ma.containsMouse && ToolTip.text !== "" 19 | MouseArea { 20 | id: ma 21 | anchors.fill: parent 22 | hoverEnabled: true 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyTabButton.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import mysettings 6 | import mysettingsenums 7 | 8 | MySettingsButton { 9 | property bool isSelected: false 10 | contentItem: Text { 11 | text: parent.text 12 | horizontalAlignment: Qt.AlignCenter 13 | color: isSelected ? theme.titleTextColor : theme.styledTextColor 14 | font.pixelSize: theme.fontSizeLarger 15 | } 16 | background: Item { 17 | visible: isSelected || hovered 18 | Rectangle { 19 | anchors.bottom: parent.bottom 20 | anchors.left: parent.left 21 | anchors.right: parent.right 22 | height: 3 23 | color: isSelected ? theme.titleTextColor : theme.styledTextColorLighter 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyTextArea.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | 6 | TextArea { 7 | id: myTextArea 8 | 9 | property string errState: "ok" // one of "ok", "error", "warning" 10 | 11 | color: enabled ? theme.textColor : theme.mutedTextColor 12 | placeholderTextColor: theme.mutedTextColor 13 | font.pixelSize: theme.fontSizeLarge 14 | background: Rectangle { 15 | implicitWidth: 150 16 | color: theme.controlBackground 17 | border.width: errState === "ok" ? 1 : 2 18 | border.color: { 19 | switch (errState) { 20 | case "ok": return theme.controlBorder; 21 | case "warning": return theme.textWarningColor; 22 | case "error": return theme.textErrorColor; 23 | } 24 | } 25 | radius: 10 26 | } 27 | padding: 10 28 | wrapMode: TextArea.Wrap 29 | 30 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 31 | } 32 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyTextButton.qml: -------------------------------------------------------------------------------- 1 | import QtQuick 2 | import QtQuick.Controls 3 | 4 | Text { 5 | id: text 6 | 7 | signal click() 8 | property string tooltip 9 | 10 | HoverHandler { id: hoverHandler } 11 | TapHandler { onTapped: { click() } } 12 | 13 | font.bold: true 14 | font.underline: hoverHandler.hovered 15 | font.pixelSize: theme.fontSizeSmall 16 | ToolTip.text: tooltip 17 | ToolTip.visible: tooltip !== "" && hoverHandler.hovered 18 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 19 | } 20 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyTextField.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | 6 | TextField { 7 | id: myTextField 8 | padding: 10 9 | placeholderTextColor: theme.mutedTextColor 10 | background: Rectangle { 11 | implicitWidth: 150 12 | color: myTextField.enabled ? theme.controlBackground : theme.disabledControlBackground 13 | border.width: 1 14 | border.color: theme.controlBorder 15 | radius: 10 16 | } 17 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 18 | color: enabled ? theme.textColor : theme.mutedTextColor 19 | } 20 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/MyToolButton.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import Qt5Compat.GraphicalEffects 6 | 7 | Button { 8 | id: myButton 9 | padding: 10 10 | property color backgroundColor: theme.iconBackgroundDark 11 | property color backgroundColorHovered: theme.iconBackgroundHovered 12 | property color toggledColor: theme.accentColor 13 | property real toggledWidth: 1 14 | property bool toggled: false 15 | property alias source: image.source 16 | property alias fillMode: image.fillMode 17 | property alias imageWidth: image.sourceSize.width 18 | property alias imageHeight: image.sourceSize.height 19 | property alias bgTransform: background.transform 20 | contentItem: Text { 21 | text: myButton.text 22 | horizontalAlignment: Text.AlignHCenter 23 | color: myButton.enabled ? theme.textColor : theme.mutedTextColor 24 | font.pixelSize: theme.fontSizeLarge 25 | Accessible.role: Accessible.Button 26 | Accessible.name: text 27 | } 28 | 29 | background: Item { 30 | id: background 31 | anchors.fill: parent 32 | Rectangle { 33 | anchors.fill: parent 34 | color: myButton.toggledColor 35 | visible: myButton.toggled 36 | border.color: myButton.toggledColor 37 | border.width: myButton.toggledWidth 38 | radius: 8 39 | } 40 | Image { 41 | id: image 42 | anchors.centerIn: parent 43 | visible: false 44 | fillMode: Image.PreserveAspectFit 45 | mipmap: true 46 | sourceSize.width: 32 47 | sourceSize.height: 32 48 | } 49 | ColorOverlay { 50 | anchors.fill: image 51 | source: image 52 | color: !myButton.enabled ? theme.mutedTextColor : myButton.hovered ? backgroundColorHovered : backgroundColor 53 | } 54 | } 55 | Accessible.role: Accessible.Button 56 | Accessible.name: text 57 | ToolTip.delay: Qt.styleHints.mousePressAndHoldInterval 58 | } 59 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/NewVersionDialog.qml: -------------------------------------------------------------------------------- 1 | import QtCore 2 | import QtQuick 3 | import QtQuick.Controls 4 | import QtQuick.Controls.Basic 5 | import QtQuick.Layouts 6 | import download 7 | import network 8 | import llm 9 | 10 | MyDialog { 11 | id: newVerionDialog 12 | anchors.centerIn: parent 13 | modal: true 14 | width: contentItem.width 15 | height: contentItem.height 16 | padding: 20 17 | closeButtonVisible: false 18 | 19 | Theme { 20 | id: theme 21 | } 22 | 23 | Item { 24 | id: contentItem 25 | width: childrenRect.width + 40 26 | height: childrenRect.height + 40 27 | 28 | Label { 29 | id: label 30 | anchors.top: parent.top 31 | anchors.left: parent.left 32 | topPadding: 20 33 | bottomPadding: 20 34 | text: qsTr("New version is available") 35 | color: theme.titleTextColor 36 | font.pixelSize: theme.fontSizeLarge 37 | font.bold: true 38 | } 39 | 40 | MySettingsButton { 41 | id: button 42 | anchors.left: label.right 43 | anchors.leftMargin: 10 44 | anchors.verticalCenter: label.verticalCenter 45 | padding: 20 46 | text: qsTr("Update") 47 | font.pixelSize: theme.fontSizeLarge 48 | Accessible.description: qsTr("Update to new version") 49 | onClicked: { 50 | if (!LLM.checkForUpdates()) 51 | checkForUpdatesError.open() 52 | } 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /gpt4all-chat/qml/ToastManager.qml: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-License-Identifier: MIT 3 | * Source: https://gist.github.com/jonmcclung/bae669101d17b103e94790341301c129 4 | * Adapted from StackOverflow: http://stackoverflow.com/questions/26879266/make-toast-in-android-by-qml 5 | */ 6 | 7 | import QtQuick 2.0 8 | 9 | /** 10 | * @brief Manager that creates Toasts dynamically 11 | */ 12 | ListView { 13 | /** 14 | * Public 15 | */ 16 | 17 | /** 18 | * @brief Shows a Toast 19 | * 20 | * @param {string} text Text to show 21 | * @param {real} duration Duration to show in milliseconds, defaults to 3000 22 | */ 23 | function show(text, duration=3000) { 24 | model.insert(0, {text: text, duration: duration}); 25 | } 26 | 27 | /** 28 | * Private 29 | */ 30 | 31 | id: root 32 | 33 | z: Infinity 34 | spacing: 5 35 | anchors.fill: parent 36 | anchors.bottomMargin: 10 37 | verticalLayoutDirection: ListView.BottomToTop 38 | 39 | interactive: false 40 | 41 | displaced: Transition { 42 | NumberAnimation { 43 | properties: "y" 44 | easing.type: Easing.InOutQuad 45 | } 46 | } 47 | 48 | delegate: Toast { 49 | Component.onCompleted: { 50 | if (typeof duration === "undefined") { 51 | show(text); 52 | } 53 | else { 54 | show(text, duration); 55 | } 56 | } 57 | } 58 | 59 | model: ListModel {id: model} 60 | } 61 | -------------------------------------------------------------------------------- /gpt4all-chat/resources/gpt4all.icns: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/resources/gpt4all.icns -------------------------------------------------------------------------------- /gpt4all-chat/resources/gpt4all.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/resources/gpt4all.ico -------------------------------------------------------------------------------- /gpt4all-chat/resources/gpt4all.rc: -------------------------------------------------------------------------------- 1 | IDI_ICON1 ICON "gpt4all.ico" 2 | -------------------------------------------------------------------------------- /gpt4all-chat/src/config.h.in: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #define APP_VERSION "@APP_VERSION@" 4 | 5 | #define G4A_CONFIG(name) (1/G4A_CONFIG_##name == 1) 6 | 7 | #define G4A_CONFIG_force_d3d12 @GPT4ALL_CONFIG_FORCE_D3D12@ 8 | -------------------------------------------------------------------------------- /gpt4all-chat/src/jinja_helpers.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "chatmodel.h" 4 | #include "database.h" 5 | 6 | #include 7 | 8 | #include // IWYU pragma: keep 9 | 10 | // IWYU pragma: no_forward_declare MessageItem 11 | // IWYU pragma: no_forward_declare PromptAttachment 12 | // IWYU pragma: no_forward_declare ResultInfo 13 | 14 | using json = nlohmann::ordered_json; 15 | 16 | 17 | template 18 | class JinjaHelper { 19 | public: 20 | json::object_t AsJson() const { return static_cast(this)->AsJson(); } 21 | }; 22 | 23 | class JinjaResultInfo : public JinjaHelper { 24 | public: 25 | explicit JinjaResultInfo(const ResultInfo &source) noexcept 26 | : m_source(&source) {} 27 | 28 | json::object_t AsJson() const; 29 | 30 | private: 31 | const ResultInfo *m_source; 32 | }; 33 | 34 | class JinjaPromptAttachment : public JinjaHelper { 35 | public: 36 | explicit JinjaPromptAttachment(const PromptAttachment &attachment) noexcept 37 | : m_attachment(&attachment) {} 38 | 39 | json::object_t AsJson() const; 40 | 41 | private: 42 | const PromptAttachment *m_attachment; 43 | }; 44 | 45 | class JinjaMessage : public JinjaHelper { 46 | public: 47 | explicit JinjaMessage(uint version, const MessageItem &item) noexcept 48 | : m_version(version), m_item(&item) {} 49 | 50 | json::object_t AsJson() const; 51 | 52 | private: 53 | uint m_version; 54 | const MessageItem *m_item; 55 | }; 56 | -------------------------------------------------------------------------------- /gpt4all-chat/src/jinja_replacements.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | extern const std::unordered_map CHAT_TEMPLATE_SUBSTITUTIONS; 7 | -------------------------------------------------------------------------------- /gpt4all-chat/src/llm.h: -------------------------------------------------------------------------------- 1 | #ifndef LLM_H 2 | #define LLM_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | class LLM : public QObject 10 | { 11 | Q_OBJECT 12 | Q_PROPERTY(bool isNetworkOnline READ isNetworkOnline NOTIFY isNetworkOnlineChanged) 13 | 14 | public: 15 | static LLM *globalInstance(); 16 | 17 | Q_INVOKABLE bool hasSettingsAccess() const; 18 | Q_INVOKABLE bool compatHardware() const { return m_compatHardware; } 19 | 20 | Q_INVOKABLE bool checkForUpdates() const; 21 | Q_INVOKABLE static bool directoryExists(const QString &path); 22 | Q_INVOKABLE static bool fileExists(const QString &path); 23 | Q_INVOKABLE qint64 systemTotalRAMInGB() const; 24 | Q_INVOKABLE QString systemTotalRAMInGBString() const; 25 | Q_INVOKABLE bool isNetworkOnline() const; 26 | 27 | Q_INVOKABLE void showDockIcon() const; 28 | Q_INVOKABLE void hideDockIcon() const; 29 | 30 | Q_SIGNALS: 31 | void isNetworkOnlineChanged(); 32 | 33 | private: 34 | bool m_compatHardware; 35 | 36 | private: 37 | explicit LLM(); 38 | ~LLM() {} 39 | friend class MyLLM; 40 | }; 41 | 42 | #endif // LLM_H 43 | -------------------------------------------------------------------------------- /gpt4all-chat/src/localdocs.h: -------------------------------------------------------------------------------- 1 | #ifndef LOCALDOCS_H 2 | #define LOCALDOCS_H 3 | 4 | #include "database.h" 5 | #include "localdocsmodel.h" 6 | 7 | #include 8 | #include 9 | #include // IWYU pragma: keep 10 | 11 | // IWYU pragma: no_forward_declare LocalDocsModel 12 | 13 | 14 | class LocalDocs : public QObject 15 | { 16 | Q_OBJECT 17 | Q_PROPERTY(bool databaseValid READ databaseValid NOTIFY databaseValidChanged) 18 | Q_PROPERTY(LocalDocsModel *localDocsModel READ localDocsModel NOTIFY localDocsModelChanged) 19 | 20 | public: 21 | static LocalDocs *globalInstance(); 22 | 23 | LocalDocsModel *localDocsModel() const { return m_localDocsModel; } 24 | 25 | Q_INVOKABLE void addFolder(const QString &collection, const QString &path); 26 | Q_INVOKABLE void removeFolder(const QString &collection, const QString &path); 27 | Q_INVOKABLE void forceIndexing(const QString &collection); 28 | 29 | Database *database() const { return m_database; } 30 | 31 | bool databaseValid() const { return m_database->isValid(); } 32 | 33 | public Q_SLOTS: 34 | void handleChunkSizeChanged(); 35 | void handleFileExtensionsChanged(); 36 | void aboutToQuit(); 37 | 38 | Q_SIGNALS: 39 | void requestStart(); 40 | void requestForceIndexing(const QString &collection, const QString &embedding_model); 41 | void forceRebuildFolder(const QString &path); 42 | void requestAddFolder(const QString &collection, const QString &path, const QString &embedding_model); 43 | void requestRemoveFolder(const QString &collection, const QString &path); 44 | void requestChunkSizeChange(int chunkSize); 45 | void requestFileExtensionsChange(const QStringList &extensions); 46 | void localDocsModelChanged(); 47 | void databaseValidChanged(); 48 | 49 | private: 50 | LocalDocsModel *m_localDocsModel; 51 | Database *m_database; 52 | 53 | private: 54 | explicit LocalDocs(); 55 | friend class MyLocalDocs; 56 | }; 57 | 58 | #endif // LOCALDOCS_H 59 | -------------------------------------------------------------------------------- /gpt4all-chat/src/logger.h: -------------------------------------------------------------------------------- 1 | #ifndef LOGGER_H 2 | #define LOGGER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | class Logger { 11 | public: 12 | explicit Logger(); 13 | 14 | static Logger *globalInstance(); 15 | 16 | private: 17 | static void messageHandler(QtMsgType type, const QMessageLogContext &context, const QString &msg); 18 | 19 | private: 20 | QFile m_file; 21 | QMutex m_mutex; 22 | 23 | friend class MyLogger; 24 | }; 25 | 26 | #endif // LOGGER_H 27 | -------------------------------------------------------------------------------- /gpt4all-chat/src/macosdock.h: -------------------------------------------------------------------------------- 1 | #ifndef MACOSDOCK_H 2 | #define MACOSDOCK_H 3 | 4 | struct MacOSDock { 5 | static void showIcon(); 6 | static void hideIcon(); 7 | }; 8 | 9 | #endif // MACOSDOCK_H 10 | -------------------------------------------------------------------------------- /gpt4all-chat/src/macosdock.mm: -------------------------------------------------------------------------------- 1 | #include "macosdock.h" 2 | 3 | #include 4 | 5 | 6 | void MacOSDock::showIcon() 7 | { 8 | [[NSApplication sharedApplication] setActivationPolicy:NSApplicationActivationPolicyRegular]; 9 | } 10 | 11 | void MacOSDock::hideIcon() 12 | { 13 | [[NSApplication sharedApplication] setActivationPolicy:NSApplicationActivationPolicyProhibited]; 14 | } 15 | -------------------------------------------------------------------------------- /gpt4all-chat/src/server.h: -------------------------------------------------------------------------------- 1 | #ifndef SERVER_H 2 | #define SERVER_H 3 | 4 | #include "chatllm.h" 5 | #include "database.h" 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include // IWYU pragma: keep 12 | #include 13 | 14 | #include 15 | #include 16 | #include 17 | 18 | class Chat; 19 | class ChatRequest; 20 | class CompletionRequest; 21 | 22 | 23 | class Server : public ChatLLM 24 | { 25 | Q_OBJECT 26 | 27 | public: 28 | explicit Server(Chat *chat); 29 | ~Server() override = default; 30 | 31 | public Q_SLOTS: 32 | void start(); 33 | 34 | Q_SIGNALS: 35 | void requestResetResponseState(); 36 | 37 | private: 38 | auto handleCompletionRequest(const CompletionRequest &request) -> std::pair>; 39 | auto handleChatRequest(const ChatRequest &request) -> std::pair>; 40 | 41 | private Q_SLOTS: 42 | void handleDatabaseResultsChanged(const QList &results) { m_databaseResults = results; } 43 | void handleCollectionListChanged(const QList &collectionList) { m_collections = collectionList; } 44 | 45 | private: 46 | Chat *m_chat; 47 | std::unique_ptr m_server; 48 | QList m_databaseResults; 49 | QList m_collections; 50 | }; 51 | 52 | #endif // SERVER_H 53 | -------------------------------------------------------------------------------- /gpt4all-chat/src/toolmodel.cpp: -------------------------------------------------------------------------------- 1 | #include "toolmodel.h" 2 | 3 | #include "codeinterpreter.h" 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | class MyToolModel: public ToolModel { }; 11 | Q_GLOBAL_STATIC(MyToolModel, toolModelInstance) 12 | ToolModel *ToolModel::globalInstance() 13 | { 14 | return toolModelInstance(); 15 | } 16 | 17 | ToolModel::ToolModel() 18 | : QAbstractListModel(nullptr) 19 | { 20 | QCoreApplication::instance()->installEventFilter(this); 21 | 22 | Tool* codeInterpreter = new CodeInterpreter; 23 | m_tools.append(codeInterpreter); 24 | m_toolMap.insert(codeInterpreter->function(), codeInterpreter); 25 | } 26 | 27 | bool ToolModel::eventFilter(QObject *obj, QEvent *ev) 28 | { 29 | if (obj == QCoreApplication::instance() && ev->type() == QEvent::LanguageChange) 30 | emit dataChanged(index(0, 0), index(m_tools.size() - 1, 0)); 31 | return false; 32 | } 33 | -------------------------------------------------------------------------------- /gpt4all-chat/src/utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include // IWYU pragma: keep 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | #include // IWYU pragma: keep 17 | 18 | // IWYU pragma: no_forward_declare QJsonValue 19 | class QJsonObject; 20 | 21 | 22 | // fmtlib formatters for QString and QVariant 23 | 24 | #define MAKE_FORMATTER(type, conversion) \ 25 | template <> \ 26 | struct fmt::formatter: fmt::formatter { \ 27 | template \ 28 | FmtContext::iterator format(const type &value, FmtContext &ctx) const \ 29 | { \ 30 | auto valueUtf8 = (conversion); \ 31 | std::string_view view(valueUtf8.cbegin(), valueUtf8.cend()); \ 32 | return formatter::format(view, ctx); \ 33 | } \ 34 | } 35 | 36 | MAKE_FORMATTER(QUtf8StringView, value ); 37 | MAKE_FORMATTER(QStringView, value.toUtf8() ); 38 | MAKE_FORMATTER(QString, value.toUtf8() ); 39 | MAKE_FORMATTER(QVariant, value.toString().toUtf8()); 40 | 41 | // alternative to QJsonObject's initializer_list constructor that accepts Latin-1 strings 42 | QJsonObject makeJsonObject(std::initializer_list> args); 43 | 44 | #include "utils.inl" // IWYU pragma: export 45 | -------------------------------------------------------------------------------- /gpt4all-chat/src/utils.inl: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | 4 | inline QJsonObject makeJsonObject(std::initializer_list> args) 5 | { 6 | QJsonObject obj; 7 | for (auto &arg : args) 8 | obj.insert(arg.first, arg.second); 9 | return obj; 10 | } 11 | -------------------------------------------------------------------------------- /gpt4all-chat/src/xlsxtomd.h: -------------------------------------------------------------------------------- 1 | #ifndef XLSXTOMD_H 2 | #define XLSXTOMD_H 3 | 4 | class QIODevice; 5 | class QString; 6 | 7 | 8 | class XLSXToMD 9 | { 10 | public: 11 | static QString toMarkdown(QIODevice *xlsxDevice); 12 | }; 13 | 14 | #endif // XLSXTOMD_H 15 | -------------------------------------------------------------------------------- /gpt4all-chat/system_requirements.md: -------------------------------------------------------------------------------- 1 | Below are the recommended and minimum system requirements for GPT4All. 2 | 3 | ### **Recommended System Requirements** 4 | | **Component** | **PC (Windows/Linux)** | **Apple** | 5 | |---------------|-------------------------------------------------------|----------------------------| 6 | | **CPU** | Ryzen 5 3600 or Intel Core i7-10700, or better | M2 Pro | 7 | | **RAM** | 16GB | 16GB | 8 | | **GPU** | NVIDIA GTX 1080 Ti/RTX 2080 or better, with 8GB+ VRAM | M2 Pro (integrated GPU) | 9 | | **OS** | At least Windows 10 or Ubuntu 24.04 LTS | macOS Sonoma 14.5 or newer | 10 | 11 | ### **Minimum System Requirements** 12 | | **Component** | **PC (Windows/Linux)** | **Apple** | 13 | |---------------|-----------------------------------------------------------------|---------------------| 14 | | **CPU** | Intel Core: i3-2100, Pentium: 7505, Celeron: 6305; AMD: FX-4100 | M1 | 15 | | **RAM** | 16GB (8GB for 3B LLMs) | 16GB | 16 | | **GPU** | Anything Direct3D 11/12 or OpenGL 2.1 capable | M1 (integrated GPU) | 17 | | **OS** | Windows 10, Ubuntu 22.04 LTS, or other compatible Linux | macOS Monterey 12.6 | 18 | 19 | Note that Windows and Linux PCs with ARM CPUs are not currently supported. 20 | -------------------------------------------------------------------------------- /gpt4all-chat/test-requirements.txt: -------------------------------------------------------------------------------- 1 | pytest~=8.3 2 | requests~=2.32 3 | -------------------------------------------------------------------------------- /gpt4all-chat/tests/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | include(FetchContent) 2 | 3 | find_package(Python3 3.12 REQUIRED COMPONENTS Interpreter) 4 | 5 | # Google test download and setup 6 | FetchContent_Declare( 7 | googletest 8 | URL https://github.com/google/googletest/archive/refs/tags/v1.15.2.zip 9 | ) 10 | FetchContent_MakeAvailable(googletest) 11 | 12 | configure_file(python/config.py.in "${CMAKE_CURRENT_SOURCE_DIR}/python/config.py") 13 | 14 | add_test(NAME ChatPythonTests 15 | COMMAND ${Python3_EXECUTABLE} -m pytest --color=yes "${CMAKE_CURRENT_SOURCE_DIR}/python" 16 | ) 17 | set_tests_properties(ChatPythonTests PROPERTIES 18 | ENVIRONMENT "CHAT_EXECUTABLE=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/chat;TEST_MODEL_PATH=${TEST_MODEL_PATH}" 19 | TIMEOUT 60 20 | ) 21 | 22 | add_executable(gpt4all_tests 23 | cpp/test_main.cpp 24 | cpp/basic_test.cpp 25 | ) 26 | 27 | target_link_libraries(gpt4all_tests PRIVATE gtest gtest_main) 28 | 29 | include(GoogleTest) 30 | gtest_discover_tests(gpt4all_tests) 31 | -------------------------------------------------------------------------------- /gpt4all-chat/tests/cpp/basic_test.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | TEST(BasicTest, TestInitialization) { 4 | EXPECT_TRUE(true); 5 | } 6 | -------------------------------------------------------------------------------- /gpt4all-chat/tests/cpp/test_main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main(int argc, char **argv) { 4 | ::testing::InitGoogleTest(&argc, argv); 5 | return RUN_ALL_TESTS(); 6 | } 7 | -------------------------------------------------------------------------------- /gpt4all-chat/tests/python/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-chat/tests/python/__init__.py -------------------------------------------------------------------------------- /gpt4all-chat/tests/python/config.py.in: -------------------------------------------------------------------------------- 1 | APP_VERSION = '@APP_VERSION@' 2 | -------------------------------------------------------------------------------- /gpt4all-lora-demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-lora-demo.gif -------------------------------------------------------------------------------- /gpt4all-training/GPT-J_MAP.md: -------------------------------------------------------------------------------- 1 | # Inference on Training Data 2 | 3 | 4 | ## Run Inference 5 | 6 | ```bash 7 | torchrun --master_port=29085 --nproc-per-node 8 inference.py --config=configs/inference/gptj.yaml 8 | ``` 9 | 10 | 11 | ## Visualizations 12 | 13 | ```bash 14 | python build_map.py 15 | ``` 16 | 17 | will build a map in `Atlas`, one using the internal clustering algorithm provided by Nomic and one using the embeddings generated by the finetuned model. -------------------------------------------------------------------------------- /gpt4all-training/build_map.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import numpy as np 3 | from nomic import atlas 4 | import glob 5 | from tqdm import tqdm 6 | from datasets import load_dataset, concatenate_datasets 7 | from sklearn.decomposition import PCA 8 | 9 | files = glob.glob("inference/*.jsonl") 10 | print(files) 11 | df = concatenate_datasets([load_dataset("json", data_files=file, split="train") for file in tqdm(files)]) 12 | 13 | print(len(df)) 14 | print(df) 15 | 16 | df = df.map(lambda example: {"inputs": [prompt + "\n" + response for prompt, response in zip(example["prompt"], example["response"])]}, 17 | batched=True, 18 | num_proc=64) 19 | 20 | df = df.map(lambda example: {"trained_on": [int(t) for t in example["is_train"]]}, 21 | batched=True, 22 | num_proc=64) 23 | 24 | df = df.remove_columns("is_train") 25 | 26 | text = df.remove_columns(["labels", "input_ids", "embeddings"]) 27 | 28 | text_df = [text[i] for i in range(len(text))] 29 | 30 | atlas.map_text(text_df, indexed_field="inputs", 31 | name="CHANGE ME!", 32 | colorable_fields=["source", "loss", "trained_on"], 33 | reset_project_if_exists=True, 34 | ) 35 | 36 | # index is local to train/test split, regenerate 37 | data = df.remove_columns(["labels", "input_ids", "index"]) 38 | data = data.add_column("index", list(range(len(data)))) 39 | # max embed dim is 2048 for now 40 | # note! this is slow in pyarrow/hf datasets 41 | embeddings = np.array(data["embeddings"]) 42 | print("embeddings shape:", embeddings.shape) 43 | embeddings = PCA(n_components=2048).fit_transform(embeddings) 44 | 45 | data = data.remove_columns(["embeddings"]) 46 | columns = data.to_pandas().to_dict("records") 47 | 48 | atlas.map_embeddings(embeddings, 49 | data=columns, 50 | id_field="index", 51 | name="CHANGE ME!", 52 | colorable_fields=["source", "loss", "trained_on"], 53 | build_topic_model=True, 54 | topic_label_field="inputs", 55 | reset_project_if_exists=True,) 56 | -------------------------------------------------------------------------------- /gpt4all-training/configs/deepspeed/ds_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "train_batch_size": "auto", 3 | "gradient_accumulation_steps": "auto", 4 | "train_micro_batch_size_per_gpu": "auto", 5 | "fp16": { 6 | "enabled": "auto", 7 | "min_loss_scale": 1, 8 | "loss_scale_window": 1000, 9 | "hysteresis": 2, 10 | "initial_scale_power": 32 11 | }, 12 | "bf16": { 13 | "enabled": "auto" 14 | }, 15 | "gradient_clipping": 1, 16 | "zero_optimization": { 17 | "stage": 2, 18 | "offload_param": { 19 | "device": "none" 20 | }, 21 | "offload_optimizer": { 22 | "device": "none" 23 | }, 24 | "allgather_partitions": true, 25 | "allgather_bucket_size": 5e8, 26 | "contiguous_gradients": true 27 | }, 28 | "optimizer": { 29 | "type": "AdamW", 30 | "params": { 31 | "lr": "auto", 32 | "betas": [ 33 | 0.9, 34 | 0.999 35 | ], 36 | "eps": 1e-08 37 | } 38 | }, 39 | "scheduler": { 40 | "type": "WarmupLR", 41 | "params": { 42 | "warmup_min_lr": 0, 43 | "warmup_max_lr": "auto", 44 | "warmup_num_steps": "auto", 45 | "warmup_type": "linear" 46 | } 47 | } 48 | } -------------------------------------------------------------------------------- /gpt4all-training/configs/deepspeed/ds_config_gptj.json: -------------------------------------------------------------------------------- 1 | { 2 | "train_batch_size": "auto", 3 | "gradient_accumulation_steps": "auto", 4 | "train_micro_batch_size_per_gpu": "auto", 5 | "fp16": { 6 | "enabled": "auto", 7 | "min_loss_scale": 1, 8 | "loss_scale_window": 1000, 9 | "hysteresis": 2, 10 | "initial_scale_power": 32 11 | }, 12 | "bf16": { 13 | "enabled": "auto" 14 | }, 15 | "gradient_clipping": 1.0, 16 | "zero_optimization": { 17 | "stage": 2, 18 | "offload_param": { 19 | "device": "none" 20 | }, 21 | "offload_optimizer": { 22 | "device": "none" 23 | }, 24 | "allgather_partitions": true, 25 | "allgather_bucket_size": 5e8, 26 | "contiguous_gradients": true 27 | }, 28 | "optimizer": { 29 | "type": "AdamW", 30 | "params": { 31 | "lr": "auto", 32 | "betas": [ 33 | 0.9, 34 | 0.999 35 | ], 36 | "eps": 1e-08 37 | } 38 | }, 39 | "scheduler": { 40 | "type": "WarmupLR", 41 | "params": { 42 | "warmup_min_lr": 0, 43 | "warmup_max_lr": "auto", 44 | "warmup_num_steps": "auto", 45 | "warmup_type": "linear" 46 | } 47 | } 48 | } -------------------------------------------------------------------------------- /gpt4all-training/configs/deepspeed/ds_config_gptj_lora.json: -------------------------------------------------------------------------------- 1 | { 2 | "train_batch_size": "auto", 3 | "gradient_accumulation_steps": "auto", 4 | "train_micro_batch_size_per_gpu": "auto", 5 | "fp16": { 6 | "enabled": "auto", 7 | "min_loss_scale": 1, 8 | "loss_scale_window": 1000, 9 | "hysteresis": 2, 10 | "initial_scale_power": 32 11 | }, 12 | "bf16": { 13 | "enabled": "auto" 14 | }, 15 | "gradient_clipping": 1, 16 | "zero_optimization": { 17 | "stage": 2, 18 | "offload_param": { 19 | "device": "cpu" 20 | }, 21 | "offload_optimizer": { 22 | "device": "cpu" 23 | }, 24 | "allgather_partitions": true, 25 | "allgather_bucket_size": 5e8, 26 | "contiguous_gradients": true 27 | }, 28 | "optimizer": { 29 | "type": "AdamW", 30 | "params": { 31 | "lr": "auto", 32 | "betas": [ 33 | 0.9, 34 | 0.999 35 | ], 36 | "eps": 1e-08 37 | } 38 | }, 39 | "scheduler": { 40 | "type": "WarmupLR", 41 | "params": { 42 | "warmup_min_lr": 0, 43 | "warmup_max_lr": "auto", 44 | "warmup_num_steps": "auto", 45 | "warmup_type": "linear" 46 | } 47 | } 48 | } -------------------------------------------------------------------------------- /gpt4all-training/configs/deepspeed/ds_config_mpt.json: -------------------------------------------------------------------------------- 1 | { 2 | "train_batch_size": "auto", 3 | "gradient_accumulation_steps": "auto", 4 | "train_micro_batch_size_per_gpu": "auto", 5 | "fp16": { 6 | "enabled": "auto", 7 | "min_loss_scale": 1, 8 | "loss_scale_window": 1000, 9 | "hysteresis": 2, 10 | "initial_scale_power": 32 11 | }, 12 | "bf16": { 13 | "enabled": "auto" 14 | }, 15 | "gradient_clipping": 1.0, 16 | "zero_optimization": { 17 | "stage": 1, 18 | "offload_param": { 19 | "device": "none" 20 | }, 21 | "offload_optimizer": { 22 | "device": "none" 23 | }, 24 | "allgather_partitions": true, 25 | "allgather_bucket_size": 5e8, 26 | "contiguous_gradients": true 27 | }, 28 | "optimizer": { 29 | "type": "AdamW", 30 | "params": { 31 | "lr": "auto", 32 | "betas": [ 33 | 0.9, 34 | 0.999 35 | ], 36 | "eps": 1e-08 37 | } 38 | }, 39 | "scheduler": { 40 | "type": "WarmupDecayLR", 41 | "params": { 42 | "warmup_min_lr": 0, 43 | "warmup_max_lr": "auto", 44 | "warmup_num_steps": "auto", 45 | "warmup_type": "linear", 46 | "total_num_steps": "auto" 47 | } 48 | } 49 | } -------------------------------------------------------------------------------- /gpt4all-training/configs/deepspeed/ds_config_pythia.json: -------------------------------------------------------------------------------- 1 | { 2 | "train_batch_size": "auto", 3 | "gradient_accumulation_steps": "auto", 4 | "train_micro_batch_size_per_gpu": "auto", 5 | "fp16": { 6 | "enabled": "auto", 7 | "min_loss_scale": 1, 8 | "loss_scale_window": 1000, 9 | "hysteresis": 2, 10 | "initial_scale_power": 32 11 | }, 12 | "bf16": { 13 | "enabled": "auto" 14 | }, 15 | "gradient_clipping": 1.0, 16 | "zero_optimization": { 17 | "stage": 2, 18 | "offload_param": { 19 | "device": "none" 20 | }, 21 | "offload_optimizer": { 22 | "device": "none" 23 | }, 24 | "allgather_partitions": true, 25 | "allgather_bucket_size": 5e8, 26 | "contiguous_gradients": true 27 | }, 28 | "optimizer": { 29 | "type": "AdamW", 30 | "params": { 31 | "lr": "auto", 32 | "betas": [ 33 | 0.9, 34 | 0.999 35 | ], 36 | "eps": 1e-08 37 | } 38 | }, 39 | "scheduler": { 40 | "type": "WarmupLR", 41 | "params": { 42 | "warmup_min_lr": 0, 43 | "warmup_max_lr": "auto", 44 | "warmup_num_steps": "auto", 45 | "warmup_type": "linear" 46 | } 47 | } 48 | } -------------------------------------------------------------------------------- /gpt4all-training/configs/eval/generate_baseline.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "zpn/llama-7b" 3 | tokenizer_name: "zpn/llama-7b" 4 | lora: true 5 | lora_path: "tloen/alpaca-lora-7b" -------------------------------------------------------------------------------- /gpt4all-training/configs/eval/generate_gpt4all_gptj.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "nomic-ai/gpt4all-warmup-lr-epoch_0" 3 | tokenizer_name: "EleutherAI/gpt-j-6b" 4 | lora: false 5 | -------------------------------------------------------------------------------- /gpt4all-training/configs/eval/generate_gpt4all_gptj_lora.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "EleutherAI/gpt-j-6b" 3 | tokenizer_name: "EleutherAI/gpt-j-6B" 4 | lora: true 5 | lora_path: "nomic-ai/gpt4all-gptj-lora-epoch_1" 6 | -------------------------------------------------------------------------------- /gpt4all-training/configs/eval/generate_gpt4all_llama_lora.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "zpn/llama-7b" 3 | tokenizer_name: "zpn/llama-7b" 4 | lora: true 5 | lora_path: "nomic-ai/gpt4all-lora" 6 | -------------------------------------------------------------------------------- /gpt4all-training/configs/generate/generate.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "zpn/llama-7b" 3 | tokenizer_name: "zpn/llama-7b" 4 | lora: true 5 | lora_path: "nomic-ai/gpt4all-lora" 6 | 7 | max_new_tokens: 512 8 | temperature: 0 9 | prompt: null 10 | -------------------------------------------------------------------------------- /gpt4all-training/configs/generate/generate_gptj.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "nomic-ai/gpt4all-warmup-lr-epoch_1" 3 | tokenizer_name: "EleutherAI/gpt-j-6b" 4 | lora: false 5 | 6 | 7 | max_new_tokens: 512 8 | temperature: 0.001 9 | prompt: | 10 | #this code prints a string reversed 11 | my_string = "hello how are you" 12 | print(len(my_string)) 13 | 14 | 15 | My code above does not work. Can you help me? 16 | -------------------------------------------------------------------------------- /gpt4all-training/configs/generate/generate_gptj_lora.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "EleutherAI/gpt-j-6b" 3 | tokenizer_name: "EleutherAI/gpt-j-6b" 4 | lora: true 5 | lora_path: "nomic-ai/gpt4all-gptj-lora-epoch_0" 6 | 7 | max_new_tokens: 512 8 | temperature: 0 9 | prompt: | 10 | #this code prints a string reversed 11 | my_string = "hello how are you" 12 | print(len(my_string)) 13 | 14 | 15 | My code above does not work. Can you help me? -------------------------------------------------------------------------------- /gpt4all-training/configs/generate/generate_llama.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: # REPLACE WITH LLAMA MODEL NAME 3 | tokenizer_name: # REPLACE WITH LLAMA MODEL NAME 4 | 5 | 6 | max_new_tokens: 512 7 | temperature: 0.001 8 | prompt: | 9 | #this code prints a string reversed 10 | my_string = "hello how are you" 11 | print(len(my_string)) 12 | 13 | 14 | My code above does not work. Can you help me? 15 | -------------------------------------------------------------------------------- /gpt4all-training/configs/inference/gptj.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "nomic-ai/gpt4all-warmup-lr-epoch_1" 3 | tokenizer_name: "EleutherAI/gpt-j-6B" 4 | 5 | # dataset 6 | streaming: false 7 | num_proc: 64 8 | dataset_path: "nomic-ai/turbo-500k-multi" 9 | max_length: 1024 10 | batch_size: 32 11 | 12 | # logging 13 | seed: 42 14 | 15 | -------------------------------------------------------------------------------- /gpt4all-training/configs/train/finetune.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: # add model here 3 | tokenizer_name: # add model here 4 | gradient_checkpointing: true 5 | save_name: # CHANGE 6 | 7 | # dataset 8 | streaming: false 9 | num_proc: 64 10 | dataset_path: # update 11 | max_length: 1024 12 | batch_size: 32 13 | 14 | # train dynamics 15 | lr: 5.0e-5 16 | eval_every: 800 17 | eval_steps: 100 18 | save_every: 800 19 | output_dir: # CHANGE 20 | checkpoint: null 21 | lora: false 22 | warmup_steps: 100 23 | num_epochs: 2 24 | 25 | # logging 26 | wandb: true 27 | wandb_entity: # update 28 | wandb_project_name: # update 29 | seed: 42 30 | 31 | -------------------------------------------------------------------------------- /gpt4all-training/configs/train/finetune_falcon.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "tiiuae/falcon-7b" 3 | tokenizer_name: "tiiuae/falcon-7b" 4 | gradient_checkpointing: true 5 | save_name: "nomic-ai/gpt4all-falcon" 6 | 7 | # dataset 8 | streaming: false 9 | num_proc: 64 10 | dataset_path: "nomic-ai/gpt4all-j-prompt-generations" 11 | revision: "v1.3-groovy" 12 | max_length: 1024 13 | batch_size: 32 14 | 15 | # train dynamics 16 | lr: 2.0e-5 17 | min_lr: 0 18 | weight_decay: 0.0 19 | eval_every: 500 20 | eval_steps: 105 21 | save_every: 1000 22 | log_grads_every: 500 23 | output_dir: "ckpts/falcon" 24 | checkpoint: "/home/paperspace/gpt4all/ckpts/mpt/step_1000" 25 | lora: false 26 | warmup_steps: 500 27 | num_epochs: 2 28 | 29 | # logging 30 | wandb: true 31 | wandb_entity: "gpt4all" 32 | wandb_project_name: "gpt4all" 33 | seed: 42 34 | 35 | -------------------------------------------------------------------------------- /gpt4all-training/configs/train/finetune_gptj.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "EleutherAI/gpt-j-6B" 3 | tokenizer_name: "EleutherAI/gpt-j-6B" 4 | gradient_checkpointing: true 5 | save_name: # CHANGE 6 | 7 | # dataset 8 | streaming: false 9 | num_proc: 64 10 | dataset_path: # CHANGE 11 | max_length: 1024 12 | batch_size: 32 13 | 14 | # train dynamics 15 | lr: 2.0e-5 16 | min_lr: 0 17 | weight_decay: 0.0 18 | eval_every: 500 19 | eval_steps: 105 20 | save_every: 500 21 | log_grads_every: 100 22 | output_dir: # CHANGE 23 | checkpoint: null 24 | lora: false 25 | warmup_steps: 500 26 | num_epochs: 2 27 | 28 | # logging 29 | wandb: true 30 | wandb_entity: # CHANGE 31 | wandb_project_name: # CHANGE 32 | seed: 42 33 | 34 | -------------------------------------------------------------------------------- /gpt4all-training/configs/train/finetune_gptj_lora.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "EleutherAI/gpt-j-6b" 3 | tokenizer_name: "EleutherAI/gpt-j-6b" 4 | gradient_checkpointing: false 5 | save_name: # CHANGE 6 | 7 | # dataset 8 | streaming: false 9 | num_proc: 64 10 | dataset_path: # CHANGE 11 | max_length: 1024 12 | batch_size: 1 13 | 14 | # train dynamics 15 | lr: 2.0e-5 16 | min_lr: 0 17 | weight_decay: 0.0 18 | eval_every: 500 19 | eval_steps: 105 20 | save_every: 500 21 | log_grads_every: 500 22 | output_dir: # CHANGE 23 | checkpoint: null 24 | lora: true 25 | warmup_steps: 500 26 | num_epochs: 2 27 | 28 | # logging 29 | wandb: true 30 | wandb_entity: # CHANGE 31 | wandb_project_name: # CHANGE 32 | seed: 42 33 | 34 | -------------------------------------------------------------------------------- /gpt4all-training/configs/train/finetune_lora.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: # update 3 | tokenizer_name: # update 4 | gradient_checkpointing: false 5 | save_name: # CHANGE 6 | 7 | # dataset 8 | streaming: false 9 | num_proc: 64 10 | dataset_path: # CHANGE 11 | max_length: 1024 12 | batch_size: 4 13 | 14 | # train dynamics 15 | lr: 5.0e-5 16 | min_lr: 0 17 | weight_decay: 0.0 18 | eval_every: 2000 19 | eval_steps: 100 20 | save_every: 2000 21 | output_dir: # CHANGE 22 | checkpoint: null 23 | lora: true 24 | warmup_steps: 100 25 | num_epochs: 2 26 | 27 | # logging 28 | wandb: true 29 | wandb_entity: # update 30 | wandb_project_name: # update 31 | seed: 42 32 | -------------------------------------------------------------------------------- /gpt4all-training/configs/train/finetune_mpt.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "mosaicml/mpt-7b" 3 | tokenizer_name: "mosaicml/mpt-7b" 4 | gradient_checkpointing: false 5 | save_name: "nomic-ai/mpt-finetuned-round2" 6 | 7 | # dataset 8 | streaming: false 9 | num_proc: 64 10 | dataset_path: "nomic-ai/gpt4all-j-prompt-generations" 11 | revision: "v1.3-groovy" 12 | max_length: 1024 13 | batch_size: 8 14 | 15 | # train dynamics 16 | lr: 2.0e-5 17 | min_lr: 0 18 | weight_decay: 0.0 19 | eval_every: 500 20 | eval_steps: 105 21 | save_every: 1000 22 | log_grads_every: 500 23 | output_dir: "ckpts/mpt" 24 | checkpoint: null 25 | lora: false 26 | warmup_steps: 500 27 | num_epochs: 2 28 | 29 | # logging 30 | wandb: false 31 | wandb_entity: "gpt4all" 32 | wandb_project_name: "gpt4all" 33 | seed: 42 34 | 35 | -------------------------------------------------------------------------------- /gpt4all-training/configs/train/finetune_openllama.yaml: -------------------------------------------------------------------------------- 1 | # model/tokenizer 2 | model_name: "openlm-research/open_llama_7b" 3 | tokenizer_name: "openlm-research/open_llama_7b" 4 | gradient_checkpointing: true 5 | save_name: "nomic-ai/gpt4all-openllama" 6 | 7 | # dataset 8 | streaming: false 9 | num_proc: 64 10 | dataset_path: "nomic-ai/gpt4all-updated" 11 | revision: null 12 | max_length: 1024 13 | batch_size: 32 14 | 15 | # train dynamics 16 | lr: 2.0e-5 17 | min_lr: 0 18 | weight_decay: 0.0 19 | eval_every: 500 20 | log_every: 10 21 | save_every: 1000 22 | log_grads_every: 500 23 | output_dir: "ckpts/falcon" 24 | checkpoint: null 25 | lora: false 26 | warmup_steps: 500 27 | num_epochs: 3 28 | 29 | # logging 30 | wandb: true 31 | wandb_entity: "gpt4all" 32 | wandb_project_name: "gpt4all" 33 | seed: 42 34 | 35 | -------------------------------------------------------------------------------- /gpt4all-training/create_hostname.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WORKER_IP=$1 4 | N_GPUS=8 5 | # create dir if doesn't exist 6 | sudo mkdir -p /job 7 | printf "localhost slots=$N_GPUS\n$WORKER_IP slots=$N_GPUS" | sudo tee /job/hostfile 8 | echo /job/hostfile -------------------------------------------------------------------------------- /gpt4all-training/env.yaml: -------------------------------------------------------------------------------- 1 | name: vicuna 2 | channels: 3 | - conda-forge 4 | - pytorch 5 | - nvidia 6 | - huggingface 7 | dependencies: 8 | - python=3.8 9 | - accelerate 10 | - datasets 11 | - torchmetrics 12 | - evaluate 13 | - transformers 14 | - wandb 15 | - jsonlines 16 | - pip: 17 | - peft 18 | - nodelist-inflator 19 | - deepspeed 20 | - sentencepiece -------------------------------------------------------------------------------- /gpt4all-training/eval_figures.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import glob 3 | import pickle 4 | import numpy as np 5 | from matplotlib import pyplot as plt 6 | 7 | plt.figure() 8 | for fpath in glob.glob('./eval_data/*.pkl'): 9 | parts = fpath.split('__') 10 | model_name = "-".join(fpath.replace(".pkl", "").split("_")[2:]) 11 | with open(fpath, 'rb') as f: 12 | data = pickle.load(f) 13 | perplexities = data['perplexities'] 14 | perplexities = np.nan_to_num(perplexities, 100) 15 | perplexities = np.clip(perplexities, 0, 100) 16 | if 'alpaca' not in fpath: 17 | identifier = model_name = "-".join(fpath.replace(".pkl", "").split("eval__model-")[1:]) 18 | label = 'GPT4all-' 19 | label += identifier 20 | 21 | else: 22 | label = 'alpaca-lora' 23 | plt.hist(perplexities, label=label, alpha=.5, bins=50) 24 | 25 | plt.xlabel('Perplexity') 26 | plt.ylabel('Frequency') 27 | plt.legend() 28 | plt.savefig('figs/perplexity_hist.png') 29 | 30 | -------------------------------------------------------------------------------- /gpt4all-training/figs/clustering_overfit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-training/figs/clustering_overfit.png -------------------------------------------------------------------------------- /gpt4all-training/figs/duplicate_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-training/figs/duplicate_loss.png -------------------------------------------------------------------------------- /gpt4all-training/figs/first_lora.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-training/figs/first_lora.png -------------------------------------------------------------------------------- /gpt4all-training/figs/overfit-gpt-j.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-training/figs/overfit-gpt-j.png -------------------------------------------------------------------------------- /gpt4all-training/figs/perplexity_hist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-training/figs/perplexity_hist.png -------------------------------------------------------------------------------- /gpt4all-training/figs/single_epoch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nomic-ai/gpt4all/b666d16db5aeab8b91aaf7963adcee9c643734d7/gpt4all-training/figs/single_epoch.png -------------------------------------------------------------------------------- /gpt4all-training/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from transformers import AutoModelForCausalLM, AutoTokenizer 3 | from peft import PeftModelForCausalLM 4 | from read import read_config 5 | from argparse import ArgumentParser 6 | import torch 7 | import time 8 | 9 | 10 | def generate(tokenizer, prompt, model, config): 11 | input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device) 12 | 13 | outputs = model.generate(input_ids=input_ids, max_new_tokens=config["max_new_tokens"], temperature=config["temperature"]) 14 | 15 | decoded = tokenizer.decode(outputs[0], skip_special_tokens=True).strip() 16 | 17 | return decoded[len(prompt):] 18 | 19 | 20 | def setup_model(config): 21 | model = AutoModelForCausalLM.from_pretrained(config["model_name"], device_map="auto", torch_dtype=torch.float16) 22 | tokenizer = AutoTokenizer.from_pretrained(config["tokenizer_name"]) 23 | added_tokens = tokenizer.add_special_tokens({"bos_token": "", "eos_token": "", "pad_token": ""}) 24 | 25 | if added_tokens > 0: 26 | model.resize_token_embeddings(len(tokenizer)) 27 | 28 | if config["lora"]: 29 | model = PeftModelForCausalLM.from_pretrained(model, config["lora_path"], device_map="auto", torch_dtype=torch.float16) 30 | model.to(dtype=torch.float16) 31 | 32 | print(f"Mem needed: {model.get_memory_footprint() / 1024 / 1024 / 1024:.2f} GB") 33 | 34 | return model, tokenizer 35 | 36 | 37 | 38 | if __name__ == "__main__": 39 | parser = ArgumentParser() 40 | parser.add_argument("--config", type=str, required=True) 41 | parser.add_argument("--prompt", type=str) 42 | 43 | args = parser.parse_args() 44 | 45 | config = read_config(args.config) 46 | 47 | if config["prompt"] is None and args.prompt is None: 48 | raise ValueError("Prompt is required either in config or as argument") 49 | 50 | prompt = config["prompt"] if args.prompt is None else args.prompt 51 | 52 | print("Setting up model") 53 | model, tokenizer = setup_model(config) 54 | 55 | print("Generating") 56 | start = time.time() 57 | generation = generate(tokenizer, prompt, model, config) 58 | print(f"Done in {time.time() - start:.2f}s") 59 | print(generation) 60 | -------------------------------------------------------------------------------- /gpt4all-training/read.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | 4 | def read_config(path): 5 | # read yaml and return contents 6 | with open(path, 'r') as file: 7 | try: 8 | return yaml.safe_load(file) 9 | except yaml.YAMLError as exc: 10 | print(exc) -------------------------------------------------------------------------------- /gpt4all-training/requirements.txt: -------------------------------------------------------------------------------- 1 | accelerate 2 | datasets 3 | einops 4 | torchmetrics 5 | evaluate 6 | transformers>=4.28.0 7 | wandb 8 | peft 9 | nodelist-inflator 10 | deepspeed 11 | sentencepiece 12 | jsonlines 13 | nomic 14 | scikit-learn 15 | matplotlib --------------------------------------------------------------------------------