├── .copier-answers.yml ├── .eslintignore ├── .eslintrc.js ├── .github └── workflows │ ├── build.yml │ ├── check-release.yml │ ├── deploy.yml │ ├── enforce-label.yml │ ├── prep-release.yml │ └── publish-release.yml ├── .gitignore ├── .prettierignore ├── .prettierrc ├── .stylelintrc ├── .yarnrc.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── RELEASE.md ├── img ├── 1-api-key.png ├── 2-jupyterlab-settings.png └── 3-usage.png ├── install.json ├── jupyterlite_ai └── __init__.py ├── package.json ├── pyproject.toml ├── schema ├── chat.json └── provider-registry.json ├── scripts └── settings-checker.js ├── setup.py ├── src ├── base-completer.ts ├── chat-handler.ts ├── completion-provider.ts ├── components │ └── stop-button.tsx ├── default-providers │ ├── Anthropic │ │ ├── completer.ts │ │ └── settings-schema.json │ ├── ChromeAI │ │ ├── completer.ts │ │ ├── instructions.ts │ │ └── settings-schema.json │ ├── MistralAI │ │ ├── completer.ts │ │ ├── instructions.ts │ │ └── settings-schema.json │ ├── Ollama │ │ ├── completer.ts │ │ ├── instructions.ts │ │ └── settings-schema.json │ ├── OpenAI │ │ ├── completer.ts │ │ └── settings-schema.json │ ├── WebLLM │ │ ├── completer.ts │ │ ├── instructions.ts │ │ └── settings-schema.json │ └── index.ts ├── global.d.ts ├── icons.ts ├── index.ts ├── provider.ts ├── settings │ ├── base.json │ ├── index.ts │ ├── panel.tsx │ └── utils.ts ├── tokens.ts └── types │ ├── ai-model.ts │ └── service-worker.d.ts ├── style ├── base.css ├── icons │ └── jupyternaut-lite.svg ├── index.css └── index.js ├── tsconfig.json └── yarn.lock /.copier-answers.yml: -------------------------------------------------------------------------------- 1 | # Changes here will be overwritten by Copier; NEVER EDIT MANUALLY 2 | _commit: v4.3.1 3 | _src_path: https://github.com/jupyterlab/extension-template 4 | author_email: '' 5 | author_name: JupyterLite Contributors 6 | has_binder: false 7 | has_settings: true 8 | kind: frontend 9 | labextension_name: @jupyterlite/ai 10 | project_short_description: AI code completions and chat for JupyterLite 11 | python_name: jupyterlite_ai 12 | repository: https://github.com/jupyterlite/ai 13 | test: false 14 | 15 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | coverage 4 | **/*.d.ts 5 | tests 6 | **/__tests__ 7 | ui-tests 8 | -------------------------------------------------------------------------------- /.eslintrc.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | extends: [ 3 | 'eslint:recommended', 4 | 'plugin:@typescript-eslint/eslint-recommended', 5 | 'plugin:@typescript-eslint/recommended', 6 | 'plugin:prettier/recommended' 7 | ], 8 | parser: '@typescript-eslint/parser', 9 | parserOptions: { 10 | project: 'tsconfig.json', 11 | sourceType: 'module' 12 | }, 13 | plugins: ['@stylistic', '@typescript-eslint'], 14 | rules: { 15 | '@typescript-eslint/naming-convention': [ 16 | 'error', 17 | { 18 | selector: 'interface', 19 | format: ['PascalCase'], 20 | custom: { 21 | regex: '^I[A-Z]', 22 | match: true 23 | } 24 | } 25 | ], 26 | '@typescript-eslint/no-unused-vars': [ 27 | 'warn', 28 | { 29 | args: 'none' 30 | } 31 | ], 32 | '@typescript-eslint/no-explicit-any': 'off', 33 | '@typescript-eslint/no-namespace': 'off', 34 | '@typescript-eslint/no-use-before-define': 'off', 35 | '@stylistic/quotes': [ 36 | 'error', 37 | 'single', 38 | { 39 | avoidEscape: true, 40 | allowTemplateLiterals: false 41 | } 42 | ], 43 | curly: ['error', 'all'], 44 | eqeqeq: 'error', 45 | 'no-restricted-imports': [ 46 | 'error', 47 | { 48 | paths: [ 49 | { 50 | name: '@mui/icons-material', 51 | message: 52 | "Please import icons using path imports, e.g. `import AddIcon from '@mui/icons-material/Add'`" 53 | } 54 | ], 55 | patterns: [ 56 | { 57 | group: ['@mui/*/*/*'], 58 | message: '3rd level imports in mui are considered private' 59 | } 60 | ] 61 | } 62 | ], 63 | 'prefer-arrow-callback': 'error' 64 | } 65 | }; 66 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: main 6 | pull_request: 7 | branches: '*' 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v4 20 | 21 | - name: Base Setup 22 | uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 23 | 24 | - name: Install dependencies 25 | run: python -m pip install -U "jupyterlab>=4.0.0,<5" 26 | 27 | - name: Lint the extension 28 | run: | 29 | set -eux 30 | jlpm 31 | jlpm run lint:check 32 | 33 | - name: Build the extension 34 | run: | 35 | set -eux 36 | python -m pip install .[test] 37 | 38 | jupyter labextension list 39 | jupyter labextension list 2>&1 | grep -ie "@jupyterlite/ai.*OK" 40 | python -m jupyterlab.browser_check 41 | 42 | - name: Package the extension 43 | run: | 44 | set -eux 45 | 46 | pip install build 47 | python -m build 48 | pip uninstall -y "jupyterlite_ai" jupyterlab 49 | 50 | - name: Upload extension packages 51 | uses: actions/upload-artifact@v4 52 | with: 53 | name: extension-artifacts 54 | path: dist/jupyterlite_ai* 55 | if-no-files-found: error 56 | 57 | test_isolated: 58 | needs: build 59 | runs-on: ubuntu-latest 60 | 61 | steps: 62 | - name: Install Python 63 | uses: actions/setup-python@v5 64 | with: 65 | python-version: '3.9' 66 | architecture: 'x64' 67 | - uses: actions/download-artifact@v4 68 | with: 69 | name: extension-artifacts 70 | - name: Install and Test 71 | run: | 72 | set -eux 73 | # Remove NodeJS, twice to take care of system and locally installed node versions. 74 | sudo rm -rf $(which node) 75 | sudo rm -rf $(which node) 76 | 77 | pip install "jupyterlab>=4.0.0,<5" jupyterlite_ai*.whl 78 | 79 | 80 | jupyter labextension list 81 | jupyter labextension list 2>&1 | grep -ie "@jupyterlite/ai.*OK" 82 | python -m jupyterlab.browser_check --no-browser-test 83 | 84 | check_links: 85 | name: Check Links 86 | runs-on: ubuntu-latest 87 | timeout-minutes: 15 88 | steps: 89 | - uses: actions/checkout@v4 90 | - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 91 | - uses: jupyterlab/maintainer-tools/.github/actions/check-links@v1 92 | 93 | check_settings: 94 | name: Check default provider settings 95 | runs-on: ubuntu-latest 96 | timeout-minutes: 5 97 | steps: 98 | - name: Checkout 99 | uses: actions/checkout@v4 100 | - name: Base Setup 101 | uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 102 | - name: Install jupyterlab 103 | run: python -m pip install -U "jupyterlab>=4.0.0,<5" 104 | - name: Install node packages 105 | run: jlpm install 106 | - name: Check the settings 107 | run: jlpm settings:check 108 | -------------------------------------------------------------------------------- /.github/workflows/check-release.yml: -------------------------------------------------------------------------------- 1 | name: Check Release 2 | on: 3 | push: 4 | branches: ["main"] 5 | pull_request: 6 | branches: ["*"] 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | check_release: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v4 18 | - name: Base Setup 19 | uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 20 | - name: Check Release 21 | uses: jupyter-server/jupyter_releaser/.github/actions/check-release@v2 22 | with: 23 | 24 | token: ${{ secrets.GITHUB_TOKEN }} 25 | 26 | - name: Upload Distributions 27 | uses: actions/upload-artifact@v4 28 | with: 29 | name: jupyterlite_ai-releaser-dist-${{ github.run_number }} 30 | path: .jupyter_releaser_checkout/dist 31 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Build and Deploy 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - '*' 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | - name: Setup Python 18 | uses: actions/setup-python@v5 19 | with: 20 | python-version: '3.11' 21 | - name: Install the dependencies 22 | run: | 23 | # install the JupyterLite dependencies 24 | python -m pip install jupyterlite-pyodide-kernel jupyterlite-core --pre 25 | 26 | # install a couple of other useful packages for the demo 27 | python -m pip install ipywidgets 28 | 29 | # install a dev version of the extension 30 | python -m pip install . 31 | - name: Build the JupyterLite site 32 | run: | 33 | jupyter lite build --output-dir dist 34 | - name: Upload artifact 35 | uses: actions/upload-pages-artifact@v3 36 | with: 37 | path: ./dist 38 | 39 | deploy: 40 | needs: build 41 | if: github.ref == 'refs/heads/main' 42 | permissions: 43 | pages: write 44 | id-token: write 45 | 46 | environment: 47 | name: github-pages 48 | url: ${{ steps.deployment.outputs.page_url }} 49 | 50 | runs-on: ubuntu-latest 51 | steps: 52 | - name: Deploy to GitHub Pages 53 | id: deployment 54 | uses: actions/deploy-pages@v4 55 | -------------------------------------------------------------------------------- /.github/workflows/enforce-label.yml: -------------------------------------------------------------------------------- 1 | name: Enforce PR label 2 | 3 | on: 4 | pull_request: 5 | types: [labeled, unlabeled, opened, edited, synchronize] 6 | jobs: 7 | enforce-label: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | pull-requests: write 11 | steps: 12 | - name: enforce-triage-label 13 | uses: jupyterlab/maintainer-tools/.github/actions/enforce-label@v1 14 | -------------------------------------------------------------------------------- /.github/workflows/prep-release.yml: -------------------------------------------------------------------------------- 1 | name: "Step 1: Prep Release" 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | version_spec: 6 | description: "New Version Specifier" 7 | default: "next" 8 | required: false 9 | branch: 10 | description: "The branch to target" 11 | required: false 12 | post_version_spec: 13 | description: "Post Version Specifier" 14 | required: false 15 | # silent: 16 | # description: "Set a placeholder in the changelog and don't publish the release." 17 | # required: false 18 | # type: boolean 19 | since: 20 | description: "Use PRs with activity since this date or git reference" 21 | required: false 22 | since_last_stable: 23 | description: "Use PRs with activity since the last stable git tag" 24 | required: false 25 | type: boolean 26 | jobs: 27 | prep_release: 28 | runs-on: ubuntu-latest 29 | permissions: 30 | contents: write 31 | steps: 32 | - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 33 | 34 | - name: Prep Release 35 | id: prep-release 36 | uses: jupyter-server/jupyter_releaser/.github/actions/prep-release@v2 37 | with: 38 | token: ${{ secrets.GITHUB_TOKEN }} 39 | version_spec: ${{ github.event.inputs.version_spec }} 40 | # silent: ${{ github.event.inputs.silent }} 41 | post_version_spec: ${{ github.event.inputs.post_version_spec }} 42 | branch: ${{ github.event.inputs.branch }} 43 | since: ${{ github.event.inputs.since }} 44 | since_last_stable: ${{ github.event.inputs.since_last_stable }} 45 | 46 | - name: "** Next Step **" 47 | run: | 48 | echo "Optional): Review Draft Release: ${{ steps.prep-release.outputs.release_url }}" 49 | -------------------------------------------------------------------------------- /.github/workflows/publish-release.yml: -------------------------------------------------------------------------------- 1 | name: "Step 2: Publish Release" 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | branch: 6 | description: "The target branch" 7 | required: false 8 | release_url: 9 | description: "The URL of the draft GitHub release" 10 | required: false 11 | steps_to_skip: 12 | description: "Comma separated list of steps to skip" 13 | required: false 14 | 15 | jobs: 16 | publish_release: 17 | runs-on: ubuntu-latest 18 | environment: release 19 | permissions: 20 | id-token: write 21 | steps: 22 | - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 23 | 24 | - uses: actions/create-github-app-token@v1 25 | id: app-token 26 | with: 27 | app-id: ${{ vars.APP_ID }} 28 | private-key: ${{ secrets.APP_PRIVATE_KEY }} 29 | 30 | - name: Populate Release 31 | id: populate-release 32 | uses: jupyter-server/jupyter_releaser/.github/actions/populate-release@v2 33 | with: 34 | token: ${{ steps.app-token.outputs.token }} 35 | branch: ${{ github.event.inputs.branch }} 36 | release_url: ${{ github.event.inputs.release_url }} 37 | steps_to_skip: ${{ github.event.inputs.steps_to_skip }} 38 | 39 | - name: Finalize Release 40 | id: finalize-release 41 | env: 42 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 43 | uses: jupyter-server/jupyter_releaser/.github/actions/finalize-release@v2 44 | with: 45 | token: ${{ steps.app-token.outputs.token }} 46 | release_url: ${{ steps.populate-release.outputs.release_url }} 47 | 48 | - name: "** Next Step **" 49 | if: ${{ success() }} 50 | run: | 51 | echo "Verify the final release" 52 | echo ${{ steps.finalize-release.outputs.release_url }} 53 | 54 | - name: "** Failure Message **" 55 | if: ${{ failure() }} 56 | run: | 57 | echo "Failed to Publish the Draft Release Url:" 58 | echo ${{ steps.populate-release.outputs.release_url }} 59 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.bundle.* 2 | lib/ 3 | node_modules/ 4 | *.log 5 | .eslintcache 6 | .stylelintcache 7 | *.egg-info/ 8 | .ipynb_checkpoints 9 | *.tsbuildinfo 10 | jupyterlite_ai/labextension 11 | # Version file is handled by hatchling 12 | jupyterlite_ai/_version.py 13 | 14 | # Schema and module built at build time 15 | src/settings/schemas/index.ts 16 | src/settings/schemas/_generated 17 | 18 | # Created by https://www.gitignore.io/api/python 19 | # Edit at https://www.gitignore.io/?templates=python 20 | 21 | ### Python ### 22 | # Byte-compiled / optimized / DLL files 23 | __pycache__/ 24 | *.py[cod] 25 | *$py.class 26 | 27 | # C extensions 28 | *.so 29 | 30 | # Distribution / packaging 31 | .Python 32 | build/ 33 | develop-eggs/ 34 | dist/ 35 | downloads/ 36 | eggs/ 37 | .eggs/ 38 | lib/ 39 | lib64/ 40 | parts/ 41 | sdist/ 42 | var/ 43 | wheels/ 44 | pip-wheel-metadata/ 45 | share/python-wheels/ 46 | .installed.cfg 47 | *.egg 48 | MANIFEST 49 | 50 | # PyInstaller 51 | # Usually these files are written by a python script from a template 52 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 53 | *.manifest 54 | *.spec 55 | 56 | # Installer logs 57 | pip-log.txt 58 | pip-delete-this-directory.txt 59 | 60 | # Unit test / coverage reports 61 | htmlcov/ 62 | .tox/ 63 | .nox/ 64 | .coverage 65 | .coverage.* 66 | .cache 67 | nosetests.xml 68 | coverage/ 69 | coverage.xml 70 | *.cover 71 | .hypothesis/ 72 | .pytest_cache/ 73 | 74 | # Translations 75 | *.mo 76 | *.pot 77 | 78 | # Scrapy stuff: 79 | .scrapy 80 | 81 | # Sphinx documentation 82 | docs/_build/ 83 | 84 | # PyBuilder 85 | target/ 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # celery beat schedule file 91 | celerybeat-schedule 92 | 93 | # SageMath parsed files 94 | *.sage.py 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # Mr Developer 104 | .mr.developer.cfg 105 | .project 106 | .pydevproject 107 | 108 | # mkdocs documentation 109 | /site 110 | 111 | # mypy 112 | .mypy_cache/ 113 | .dmypy.json 114 | dmypy.json 115 | 116 | # Pyre type checker 117 | .pyre/ 118 | 119 | # End of https://www.gitignore.io/api/python 120 | 121 | # OSX files 122 | .DS_Store 123 | 124 | # Yarn cache 125 | .yarn/ 126 | 127 | # JupyterLite 128 | _output 129 | *.doit.db 130 | 131 | # Jupyter 132 | Untitled*.ipynb 133 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | **/node_modules 3 | **/lib 4 | **/package.json 5 | !/package.json 6 | jupyterlite_ai 7 | .venv 8 | src/default-providers/*/settings-schema.json 9 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "none", 4 | "arrowParens": "avoid", 5 | "endOfLine": "auto", 6 | "overrides": [ 7 | { 8 | "files": "package.json", 9 | "options": { 10 | "tabWidth": 4 11 | } 12 | } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /.stylelintrc: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "stylelint-config-recommended", 4 | "stylelint-config-standard", 5 | "stylelint-prettier/recommended" 6 | ], 7 | "plugins": [ 8 | "stylelint-csstree-validator" 9 | ], 10 | "rules": { 11 | "csstree/validator": true, 12 | "property-no-vendor-prefix": null, 13 | "selector-class-pattern": "^([a-z][A-z\\d]*)(-[A-z\\d]+)*$", 14 | "selector-no-vendor-prefix": null, 15 | "value-no-vendor-prefix": null 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /.yarnrc.yml: -------------------------------------------------------------------------------- 1 | nodeLinker: node-modules 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | 4 | 5 | ## 0.7.0 6 | 7 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.6.2...317fedd438232fb3add50e28037adb637cbc0814)) 8 | 9 | ### Enhancements made 10 | 11 | - Add a welcome message [#89](https://github.com/jupyterlite/ai/pull/89) ([@brichet](https://github.com/brichet)) 12 | - Handle compatibility with chromeAI and WebLLM [#87](https://github.com/jupyterlite/ai/pull/87) ([@brichet](https://github.com/brichet)) 13 | - Do not expose providers api [#84](https://github.com/jupyterlite/ai/pull/84) ([@brichet](https://github.com/brichet)) 14 | - Remove the custom settings connector [#81](https://github.com/jupyterlite/ai/pull/81) ([@brichet](https://github.com/brichet)) 15 | - Upgrade secrets manager [#75](https://github.com/jupyterlite/ai/pull/75) ([@brichet](https://github.com/brichet)) 16 | - Better handling of default values in settings [#73](https://github.com/jupyterlite/ai/pull/73) ([@brichet](https://github.com/brichet)) 17 | - Add Ollama provider [#69](https://github.com/jupyterlite/ai/pull/69) ([@brichet](https://github.com/brichet)) 18 | - WebLLM [#47](https://github.com/jupyterlite/ai/pull/47) ([@jtpio](https://github.com/jtpio)) 19 | 20 | ### Bugs fixed 21 | 22 | - Export the IAIProviderRegistry token [#88](https://github.com/jupyterlite/ai/pull/88) ([@brichet](https://github.com/brichet)) 23 | - Update `@langchain/community` to fix ChromeAI [#76](https://github.com/jupyterlite/ai/pull/76) ([@jtpio](https://github.com/jtpio)) 24 | 25 | ### Maintenance and upkeep improvements 26 | 27 | - Pin PyPI version of jupyter-secrets-manager [#86](https://github.com/jupyterlite/ai/pull/86) ([@brichet](https://github.com/brichet)) 28 | - Install `ipywidgets` for the demo deployed on GitHub Pages [#79](https://github.com/jupyterlite/ai/pull/79) ([@jtpio](https://github.com/jtpio)) 29 | 30 | ### Documentation improvements 31 | 32 | - Mention JupyterLab 4.4 and Notebook 7.4 final in the README [#83](https://github.com/jupyterlite/ai/pull/83) ([@jtpio](https://github.com/jtpio)) 33 | - Update Ollama instructions [#82](https://github.com/jupyterlite/ai/pull/82) ([@brichet](https://github.com/brichet)) 34 | 35 | ### Contributors to this release 36 | 37 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2025-05-13&to=2025-06-05&type=c)) 38 | 39 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2025-05-13..2025-06-05&type=Issues) | [@jtpio](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Ajtpio+updated%3A2025-05-13..2025-06-05&type=Issues) | [@trungleduc](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Atrungleduc+updated%3A2025-05-13..2025-06-05&type=Issues) 40 | 41 | 42 | 43 | ## 0.6.2 44 | 45 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.6.1...8cf12919ab5922b2ec7ed8f284299725a493d349)) 46 | 47 | ### Bugs fixed 48 | 49 | - Fix completer settings [#70](https://github.com/jupyterlite/ai/pull/70) ([@brichet](https://github.com/brichet)) 50 | - Fix the API keys in provider when using the secrets manager [#68](https://github.com/jupyterlite/ai/pull/68) ([@brichet](https://github.com/brichet)) 51 | 52 | ### Maintenance and upkeep improvements 53 | 54 | - Align the version of rjsf dependencies [#72](https://github.com/jupyterlite/ai/pull/72) ([@brichet](https://github.com/brichet)) 55 | 56 | ### Contributors to this release 57 | 58 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2025-05-02&to=2025-05-13&type=c)) 59 | 60 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2025-05-02..2025-05-13&type=Issues) 61 | 62 | ## 0.6.1 63 | 64 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.6.0...52376d7823635a8561eda88d6fcd7acd615c50c8)) 65 | 66 | ### Enhancements made 67 | 68 | - Allow to avoid displaying the secret fields of the settings UI [#65](https://github.com/jupyterlite/ai/pull/65) ([@brichet](https://github.com/brichet)) 69 | - Update secrets manager to >=0.3.0 [#63](https://github.com/jupyterlite/ai/pull/63) ([@brichet](https://github.com/brichet)) 70 | 71 | ### Maintenance and upkeep improvements 72 | 73 | - Update secrets manager to >=0.3.0 [#63](https://github.com/jupyterlite/ai/pull/63) ([@brichet](https://github.com/brichet)) 74 | - Update to jupyterlab>=4.4.0 [#62](https://github.com/jupyterlite/ai/pull/62) ([@brichet](https://github.com/brichet)) 75 | 76 | ### Contributors to this release 77 | 78 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2025-03-31&to=2025-05-02&type=c)) 79 | 80 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2025-03-31..2025-05-02&type=Issues) 81 | 82 | ## 0.6.0 83 | 84 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.5.0...15b6de565429273e0b159fa1a66712575449605d)) 85 | 86 | ### Enhancements made 87 | 88 | - Stop streaming [#61](https://github.com/jupyterlite/ai/pull/61) ([@brichet](https://github.com/brichet)) 89 | - Do not store passwords to server settings [#60](https://github.com/jupyterlite/ai/pull/60) ([@brichet](https://github.com/brichet)) 90 | 91 | ### Contributors to this release 92 | 93 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2025-03-21&to=2025-03-31&type=c)) 94 | 95 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2025-03-21..2025-03-31&type=Issues) | [@jtpio](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Ajtpio+updated%3A2025-03-21..2025-03-31&type=Issues) 96 | 97 | ## 0.5.0 98 | 99 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.4.0...f37fb548ee1c49f5600495ccb6be35ab976a3bce)) 100 | 101 | ### Enhancements made 102 | 103 | - Default providers refactoring [#58](https://github.com/jupyterlite/ai/pull/58) ([@brichet](https://github.com/brichet)) 104 | - Use the secrets manager [#53](https://github.com/jupyterlite/ai/pull/53) ([@brichet](https://github.com/brichet)) 105 | 106 | ### Bugs fixed 107 | 108 | - Avoid building settings schemas when building javascript [#59](https://github.com/jupyterlite/ai/pull/59) ([@brichet](https://github.com/brichet)) 109 | 110 | ### Maintenance and upkeep improvements 111 | 112 | - Default providers refactoring [#58](https://github.com/jupyterlite/ai/pull/58) ([@brichet](https://github.com/brichet)) 113 | - Update @jupyter/chat to v0.8.1 [#57](https://github.com/jupyterlite/ai/pull/57) ([@brichet](https://github.com/brichet)) 114 | 115 | ### Contributors to this release 116 | 117 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2025-03-10&to=2025-03-21&type=c)) 118 | 119 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2025-03-10..2025-03-21&type=Issues) 120 | 121 | ## 0.4.0 122 | 123 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.3.0...bd9c07a7fec2bfb62c6863a0aacdaefbf22bcd82)) 124 | 125 | ### Enhancements made 126 | 127 | - Provider registry [#50](https://github.com/jupyterlite/ai/pull/50) ([@brichet](https://github.com/brichet)) 128 | - Completer plugin [#49](https://github.com/jupyterlite/ai/pull/49) ([@brichet](https://github.com/brichet)) 129 | - Settings UI improvement [#48](https://github.com/jupyterlite/ai/pull/48) ([@brichet](https://github.com/brichet)) 130 | 131 | ### Contributors to this release 132 | 133 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2025-02-19&to=2025-03-10&type=c)) 134 | 135 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2025-02-19..2025-03-10&type=Issues) 136 | 137 | ## 0.3.0 138 | 139 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.2.0...1b482ade692e42ad8885aaf3211502720cadeecf)) 140 | 141 | ### Enhancements made 142 | 143 | - Add chat autocompletion and the `/clear` command [#41](https://github.com/jupyterlite/ai/pull/41) ([@jtpio](https://github.com/jtpio)) 144 | - Add icon and name for the AI assistant [#40](https://github.com/jupyterlite/ai/pull/40) ([@jtpio](https://github.com/jtpio)) 145 | - Stream responses [#39](https://github.com/jupyterlite/ai/pull/39) ([@jtpio](https://github.com/jtpio)) 146 | - Use a chat model instead of LLM for codestral completion [#31](https://github.com/jupyterlite/ai/pull/31) ([@brichet](https://github.com/brichet)) 147 | - Add initial system prompt in ChatHandler and completion [#28](https://github.com/jupyterlite/ai/pull/28) ([@brichet](https://github.com/brichet)) 148 | - Add `ChromeAI` [#27](https://github.com/jupyterlite/ai/pull/27) ([@jtpio](https://github.com/jtpio)) 149 | - Anthropic (Claude) provider [#22](https://github.com/jupyterlite/ai/pull/22) ([@brichet](https://github.com/brichet)) 150 | - Add OpenAI provider [#19](https://github.com/jupyterlite/ai/pull/19) ([@brichet](https://github.com/brichet)) 151 | - Dynamic settings for providers [#14](https://github.com/jupyterlite/ai/pull/14) ([@brichet](https://github.com/brichet)) 152 | 153 | ### Bugs fixed 154 | 155 | - Update to a newer `@langchain/community` to fix ChromeAI integration [#43](https://github.com/jupyterlite/ai/pull/43) ([@jtpio](https://github.com/jtpio)) 156 | - Upgrade the jupyterlite-core package in deployment [#30](https://github.com/jupyterlite/ai/pull/30) ([@brichet](https://github.com/brichet)) 157 | 158 | ### Maintenance and upkeep improvements 159 | 160 | - Deployment with prereleased jupyterlite-pyodide-kernel [#33](https://github.com/jupyterlite/ai/pull/33) ([@brichet](https://github.com/brichet)) 161 | - Fix installation of pre-released jupyterlite in deployment [#32](https://github.com/jupyterlite/ai/pull/32) ([@brichet](https://github.com/brichet)) 162 | - Upgrade the jupyterlite-core package in deployment [#30](https://github.com/jupyterlite/ai/pull/30) ([@brichet](https://github.com/brichet)) 163 | 164 | ### Documentation improvements 165 | 166 | - Update README.md [#26](https://github.com/jupyterlite/ai/pull/26) ([@jtpio](https://github.com/jtpio)) 167 | 168 | ### Contributors to this release 169 | 170 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2024-12-04&to=2025-02-19&type=c)) 171 | 172 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2024-12-04..2025-02-19&type=Issues) | [@jtpio](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Ajtpio+updated%3A2024-12-04..2025-02-19&type=Issues) 173 | 174 | ## 0.2.0 175 | 176 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/v0.1.0...8c41100bf87c99e377fd4752c50853dace7667e1)) 177 | 178 | ### Enhancements made 179 | 180 | - Refactoring AIProvider and handling errors [#15](https://github.com/jupyterlite/ai/pull/15) ([@brichet](https://github.com/brichet)) 181 | - Making the LLM providers more generics [#10](https://github.com/jupyterlite/ai/pull/10) ([@brichet](https://github.com/brichet)) 182 | - Use a throttler instead of a debouncer for code completion [#8](https://github.com/jupyterlite/ai/pull/8) ([@brichet](https://github.com/brichet)) 183 | - Update @jupyter/chat to 0.5.0 [#7](https://github.com/jupyterlite/ai/pull/7) ([@brichet](https://github.com/brichet)) 184 | - Switch to using langchain.js [#6](https://github.com/jupyterlite/ai/pull/6) ([@jtpio](https://github.com/jtpio)) 185 | 186 | ### Bugs fixed 187 | 188 | - Improves the relevance of codestral completion [#18](https://github.com/jupyterlite/ai/pull/18) ([@brichet](https://github.com/brichet)) 189 | 190 | ### Maintenance and upkeep improvements 191 | 192 | - Update references to the repo after the rename [#21](https://github.com/jupyterlite/ai/pull/21) ([@jtpio](https://github.com/jtpio)) 193 | - Rename the extension `jupyterlite_ai` [#20](https://github.com/jupyterlite/ai/pull/20) ([@brichet](https://github.com/brichet)) 194 | 195 | ### Contributors to this release 196 | 197 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2024-06-24&to=2024-12-04&type=c)) 198 | 199 | [@brichet](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Abrichet+updated%3A2024-06-24..2024-12-04&type=Issues) | [@jtpio](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Ajtpio+updated%3A2024-06-24..2024-12-04&type=Issues) 200 | 201 | ## 0.1.0 202 | 203 | ([Full Changelog](https://github.com/jupyterlite/ai/compare/9c8d350b8876ad3a9ffe8dbe723ca093bb680681...b77e9e9a563cda3b9d37972248e738746f7370a8)) 204 | 205 | ### Maintenance and upkeep improvements 206 | 207 | - Reset version [#4](https://github.com/jupyterlite/ai/pull/4) ([@jtpio](https://github.com/jtpio)) 208 | 209 | ### Documentation improvements 210 | 211 | - Add disclaimer [#3](https://github.com/jupyterlite/ai/pull/3) ([@jtpio](https://github.com/jtpio)) 212 | - Update links to the repo [#2](https://github.com/jupyterlite/ai/pull/2) ([@jtpio](https://github.com/jtpio)) 213 | - Add files for a JupyterLite demo [#1](https://github.com/jupyterlite/ai/pull/1) ([@jtpio](https://github.com/jtpio)) 214 | 215 | ### Contributors to this release 216 | 217 | ([GitHub contributors page for this release](https://github.com/jupyterlite/ai/graphs/contributors?from=2024-06-10&to=2024-06-24&type=c)) 218 | 219 | [@jtpio](https://github.com/search?q=repo%3Ajupyterlite%2Fai+involves%3Ajtpio+updated%3A2024-06-10..2024-06-24&type=Issues) 220 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2024, JupyterLite Contributors 4 | Copyright (c) 2024, Jeremy Tuloup 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are met: 9 | 10 | 1. Redistributions of source code must retain the above copyright notice, this 11 | list of conditions and the following disclaimer. 12 | 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | 3. Neither the name of the copyright holder nor the names of its 18 | contributors may be used to endorse or promote products derived from 19 | this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # jupyterlite-ai 2 | 3 | [![Github Actions Status](https://github.com/jupyterlite/ai/workflows/Build/badge.svg)](https://github.com/jupyterlite/ai/actions/workflows/build.yml) 4 | [![lite-badge](https://jupyterlite.rtfd.io/en/latest/_static/badge.svg)](https://jupyterlite.github.io/ai/lab/index.html) 5 | 6 | AI code completions and chat for JupyterLab, Notebook 7 and JupyterLite ✨ 7 | 8 | [a screencast showing the Jupyterlite AI extension in JupyterLite](https://github.com/jupyterlite/ai/assets/591645/855c4e3e-3a63-4868-8052-5c9909922c21) 9 | 10 | ## Requirements 11 | 12 | > [!NOTE] 13 | > This extension is meant to be used in JupyterLite to enable AI code completions and chat in the browser, with a specific provider. 14 | > To enable more AI providers in JupyterLab and Jupyter Notebook, we recommend using the [Jupyter AI](https://github.com/jupyterlab/jupyter-ai) extension directly. 15 | > At the moment Jupyter AI is not compatible with JupyterLite, but might be to some extent in the future. 16 | 17 | - JupyterLab >= 4.4.0 or Notebook >= 7.4.0 18 | 19 | ## ✨ Try it in your browser ✨ 20 | 21 | You can try the extension in your browser using JupyterLite: 22 | 23 | [![lite-badge](https://jupyterlite.rtfd.io/en/latest/_static/badge.svg)](https://jupyterlite.github.io/ai/lab/index.html) 24 | 25 | See the [Usage](#usage) section below for more information on how to provide your API key. 26 | 27 | ## Install 28 | 29 | To install the extension, execute: 30 | 31 | ```bash 32 | pip install jupyterlite-ai 33 | ``` 34 | 35 | To install requirements (jupyterlab, jupyterlite and notebook), there is an optional dependencies argument: 36 | 37 | ```bash 38 | pip install jupyterlite-ai[jupyter] 39 | ``` 40 | 41 | # Usage 42 | 43 | AI providers typically require using an API key to access their models. 44 | 45 | The process is different for each provider, so you may refer to their documentation to learn how to generate new API keys, if they are not covered in the sections below. 46 | 47 | ## Using MistralAI 48 | 49 | > [!WARNING] 50 | > This extension is still very much experimental. It is not an official MistralAI extension. 51 | 52 | 1. Go to https://console.mistral.ai/api-keys/ and create an API key. 53 | 54 | ![Screenshot showing how to create an API key](./img/1-api-key.png) 55 | 56 | 2. Open the JupyterLab settings and go to the **Ai providers** section to select the `MistralAI` 57 | provider and the API key (required). 58 | 59 | ![Screenshot showing how to add the API key to the settings](./img/2-jupyterlab-settings.png) 60 | 61 | 3. Open the chat, or use the inline completer 62 | 63 | ![Screenshot showing how to use the chat](./img/3-usage.png) 64 | 65 | ## Using ChromeAI 66 | 67 | > [!WARNING] 68 | > Support for ChromeAI is still experimental and only available in Google Chrome. 69 | 70 | You can test ChromeAI is enabled in your browser by going to the following URL: https://chromeai.org/ 71 | 72 | Enable the proper flags in Google Chrome. 73 | 74 | - chrome://flags/#prompt-api-for-gemini-nano 75 | - Select: `Enabled` 76 | - chrome://flags/#optimization-guide-on-device-model 77 | - Select: `Enabled BypassPrefRequirement` 78 | - chrome://components 79 | - Click `Check for Update` on Optimization Guide On Device Model to download the model 80 | - [Optional] chrome://flags/#text-safety-classifier 81 | 82 | ![a screenshot showing how to enable the ChromeAI flag in Google Chrome](https://github.com/user-attachments/assets/d48f46cc-52ee-4ce5-9eaf-c763cdbee04c) 83 | 84 | Then restart Chrome for these changes to take effect. 85 | 86 | > [!WARNING] 87 | > On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing). 88 | > During the download, ChromeAI may not be available via the extension. 89 | 90 | > [!NOTE] 91 | > For more information about Chrome Built-in AI: https://developer.chrome.com/docs/ai/get-started 92 | 93 | ## Uninstall 94 | 95 | To remove the extension, execute: 96 | 97 | ```bash 98 | pip uninstall jupyterlite-ai 99 | ``` 100 | 101 | ## Contributing 102 | 103 | ### Development install 104 | 105 | Note: You will need NodeJS to build the extension package. 106 | 107 | The `jlpm` command is JupyterLab's pinned version of 108 | [yarn](https://yarnpkg.com/) that is installed with JupyterLab. You may use 109 | `yarn` or `npm` in lieu of `jlpm` below. 110 | 111 | ```bash 112 | # Clone the repo to your local environment 113 | # Change directory to the jupyterlite_ai directory 114 | # Install package in development mode 115 | pip install -e "." 116 | # Link your development version of the extension with JupyterLab 117 | jupyter labextension develop . --overwrite 118 | # Rebuild extension Typescript source after making changes 119 | jlpm build 120 | ``` 121 | 122 | You can watch the source directory and run JupyterLab at the same time in different terminals to watch for changes in the extension's source and automatically rebuild the extension. 123 | 124 | ```bash 125 | # Watch the source directory in one terminal, automatically rebuilding when needed 126 | jlpm watch 127 | # Run JupyterLab in another terminal 128 | jupyter lab 129 | ``` 130 | 131 | With the watch command running, every saved change will immediately be built locally and available in your running JupyterLab. Refresh JupyterLab to load the change in your browser (you may need to wait several seconds for the extension to be rebuilt). 132 | 133 | By default, the `jlpm build` command generates the source maps for this extension to make it easier to debug using the browser dev tools. To also generate source maps for the JupyterLab core extensions, you can run the following command: 134 | 135 | ```bash 136 | jupyter lab build --minimize=False 137 | ``` 138 | 139 | ### Development uninstall 140 | 141 | ```bash 142 | pip uninstall jupyterlite-ai 143 | ``` 144 | 145 | In development mode, you will also need to remove the symlink created by `jupyter labextension develop` 146 | command. To find its location, you can run `jupyter labextension list` to figure out where the `labextensions` 147 | folder is located. Then you can remove the symlink named `@jupyterlite/ai` within that folder. 148 | 149 | ### Packaging the extension 150 | 151 | See [RELEASE](RELEASE.md) 152 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Making a new release of jupyterlite_ai 2 | 3 | The extension can be published to `PyPI` and `npm` manually or using the [Jupyter Releaser](https://github.com/jupyter-server/jupyter_releaser). 4 | 5 | ## Manual release 6 | 7 | ### Python package 8 | 9 | This extension can be distributed as Python packages. All of the Python 10 | packaging instructions are in the `pyproject.toml` file to wrap your extension in a 11 | Python package. Before generating a package, you first need to install some tools: 12 | 13 | ```bash 14 | pip install build twine hatch 15 | ``` 16 | 17 | Bump the version using `hatch`. By default this will create a tag. 18 | See the docs on [hatch-nodejs-version](https://github.com/agoose77/hatch-nodejs-version#semver) for details. 19 | 20 | ```bash 21 | hatch version 22 | ``` 23 | 24 | Make sure to clean up all the development files before building the package: 25 | 26 | ```bash 27 | jlpm clean:all 28 | ``` 29 | 30 | You could also clean up the local git repository: 31 | 32 | ```bash 33 | git clean -dfX 34 | ``` 35 | 36 | To create a Python source package (`.tar.gz`) and the binary package (`.whl`) in the `dist/` directory, do: 37 | 38 | ```bash 39 | python -m build 40 | ``` 41 | 42 | > `python setup.py sdist bdist_wheel` is deprecated and will not work for this package. 43 | 44 | Then to upload the package to PyPI, do: 45 | 46 | ```bash 47 | twine upload dist/* 48 | ``` 49 | 50 | ### NPM package 51 | 52 | To publish the frontend part of the extension as a NPM package, do: 53 | 54 | ```bash 55 | npm login 56 | npm publish --access public 57 | ``` 58 | 59 | ## Automated releases with the Jupyter Releaser 60 | 61 | The extension repository should already be compatible with the Jupyter Releaser. But 62 | the GitHub repository and the package managers need to be properly set up. Please 63 | follow the instructions of the Jupyter Releaser [checklist](https://jupyter-releaser.readthedocs.io/en/latest/how_to_guides/convert_repo_from_repo.html). 64 | 65 | Here is a summary of the steps to cut a new release: 66 | 67 | - Go to the Actions panel 68 | - Run the "Step 1: Prep Release" workflow 69 | - Check the draft changelog 70 | - Run the "Step 2: Publish Release" workflow 71 | 72 | > [!NOTE] 73 | > Check out the [workflow documentation](https://jupyter-releaser.readthedocs.io/en/latest/get_started/making_release_from_repo.html) 74 | > for more information. 75 | 76 | ## Publishing to `conda-forge` 77 | 78 | If the package is not on conda forge yet, check the documentation to learn how to add it: https://conda-forge.org/docs/maintainer/adding_pkgs.html 79 | 80 | Otherwise a bot should pick up the new version publish to PyPI, and open a new PR on the feedstock repository automatically. 81 | -------------------------------------------------------------------------------- /img/1-api-key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jupyterlite/ai/c519df67c75de363e7dac9526ed49c9ca158a6f3/img/1-api-key.png -------------------------------------------------------------------------------- /img/2-jupyterlab-settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jupyterlite/ai/c519df67c75de363e7dac9526ed49c9ca158a6f3/img/2-jupyterlab-settings.png -------------------------------------------------------------------------------- /img/3-usage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jupyterlite/ai/c519df67c75de363e7dac9526ed49c9ca158a6f3/img/3-usage.png -------------------------------------------------------------------------------- /install.json: -------------------------------------------------------------------------------- 1 | { 2 | "packageManager": "python", 3 | "packageName": "jupyterlite_ai", 4 | "uninstallInstructions": "Use your Python package manager (pip, conda, etc.) to uninstall the package jupyterlite_ai" 5 | } 6 | -------------------------------------------------------------------------------- /jupyterlite_ai/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from ._version import __version__ 3 | except ImportError: 4 | # Fallback when using the package in dev mode without installing 5 | # in editable mode with pip. It is highly recommended to install 6 | # the package from a stable release or in editable mode: https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs 7 | import warnings 8 | warnings.warn("Importing 'jupyterlite_ai' outside a proper installation.") 9 | __version__ = "dev" 10 | 11 | 12 | def _jupyter_labextension_paths(): 13 | return [{ 14 | "src": "labextension", 15 | "dest": "@jupyterlite/ai" 16 | }] 17 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@jupyterlite/ai", 3 | "version": "0.7.0", 4 | "description": "AI code completions and chat for JupyterLite", 5 | "keywords": [ 6 | "jupyter", 7 | "jupyterlab", 8 | "jupyterlab-extension" 9 | ], 10 | "homepage": "https://github.com/jupyterlite/ai", 11 | "bugs": { 12 | "url": "https://github.com/jupyterlite/ai/issues" 13 | }, 14 | "license": "BSD-3-Clause", 15 | "author": "JupyterLite Contributors", 16 | "files": [ 17 | "lib/**/*.{d.ts,eot,gif,html,jpg,js,js.map,json,png,svg,woff2,ttf,md}", 18 | "style/**/*.{css,js,eot,gif,html,jpg,json,png,svg,woff2,ttf}", 19 | "src/**/*.{ts,tsx}", 20 | "schema/*.json" 21 | ], 22 | "main": "lib/index.js", 23 | "types": "lib/index.d.ts", 24 | "style": "style/index.css", 25 | "repository": { 26 | "type": "git", 27 | "url": "https://github.com/jupyterlite/ai.git" 28 | }, 29 | "scripts": { 30 | "build": "jlpm build:lib && jlpm build:labextension:dev", 31 | "build:dev": "jlpm build:lib && jlpm build:labextension:dev", 32 | "build:prod": "jlpm settings:build && jlpm clean && jlpm build:lib:prod && jlpm build:labextension", 33 | "build:labextension": "jupyter labextension build .", 34 | "build:labextension:dev": "jupyter labextension build --development True .", 35 | "build:lib": "tsc --sourceMap", 36 | "build:lib:prod": "tsc", 37 | "clean": "jlpm clean:lib", 38 | "clean:lib": "rimraf lib tsconfig.tsbuildinfo", 39 | "clean:lintcache": "rimraf .eslintcache .stylelintcache", 40 | "clean:labextension": "rimraf jupyterlite_ai/labextension jupyterlite_ai/_version.py", 41 | "clean:all": "jlpm clean:lib && jlpm clean:labextension && jlpm clean:lintcache", 42 | "eslint": "jlpm eslint:check --fix", 43 | "eslint:check": "eslint . --cache --ext .ts,.tsx", 44 | "install:extension": "jlpm build", 45 | "lint": "jlpm stylelint && jlpm prettier && jlpm eslint", 46 | "lint:check": "jlpm stylelint:check && jlpm prettier:check && jlpm eslint:check", 47 | "prettier": "jlpm prettier:base --write --list-different", 48 | "prettier:base": "prettier \"**/*{.ts,.tsx,.js,.jsx,.css,.json,.md}\"", 49 | "prettier:check": "jlpm prettier:base --check", 50 | "settings:build": "node ./scripts/settings-checker.js --generate", 51 | "settings:check": "node ./scripts/settings-checker.js", 52 | "stylelint": "jlpm stylelint:check --fix", 53 | "stylelint:check": "stylelint --cache \"style/**/*.css\"", 54 | "watch": "run-p watch:src watch:labextension", 55 | "watch:src": "tsc -w --sourceMap", 56 | "watch:labextension": "jupyter labextension watch ." 57 | }, 58 | "dependencies": { 59 | "@jupyter/chat": "^0.12.0", 60 | "@jupyterlab/application": "^4.4.0", 61 | "@jupyterlab/apputils": "^4.5.0", 62 | "@jupyterlab/completer": "^4.4.0", 63 | "@jupyterlab/coreutils": "^6.4.0", 64 | "@jupyterlab/notebook": "^4.4.0", 65 | "@jupyterlab/rendermime": "^4.4.0", 66 | "@jupyterlab/settingregistry": "^4.4.0", 67 | "@jupyterlab/ui-components": "^4.4.0", 68 | "@langchain/anthropic": "^0.3.9", 69 | "@langchain/community": "^0.3.44", 70 | "@langchain/core": "^0.3.57", 71 | "@langchain/mistralai": "^0.1.1", 72 | "@langchain/ollama": "^0.2.0", 73 | "@langchain/openai": "^0.4.4", 74 | "@lumino/coreutils": "^2.1.2", 75 | "@lumino/polling": "^2.1.2", 76 | "@lumino/signaling": "^2.1.2", 77 | "@mlc-ai/web-llm": "^0.2.79", 78 | "@mlc-ai/web-runtime": "^0.18.0-dev2", 79 | "@mlc-ai/web-tokenizers": "^0.1.6", 80 | "@mui/icons-material": "^5.11.0", 81 | "@mui/material": "^5.11.0", 82 | "@rjsf/core": "^5.18.4", 83 | "@rjsf/utils": "^5.18.4", 84 | "@rjsf/validator-ajv8": "^5.18.4", 85 | "json5": "^2.2.3", 86 | "jupyter-secrets-manager": "^0.4.0", 87 | "react": "^18.2.0", 88 | "react-dom": "^18.2.0" 89 | }, 90 | "devDependencies": { 91 | "@jupyterlab/builder": "^4.4.0", 92 | "@stylistic/eslint-plugin": "^3.0.1", 93 | "@types/chrome": "^0.0.304", 94 | "@types/json-schema": "^7.0.11", 95 | "@types/react": "^18.0.26", 96 | "@types/react-addons-linked-state-mixin": "^0.14.22", 97 | "@typescript-eslint/eslint-plugin": "^6.1.0", 98 | "@typescript-eslint/parser": "^6.1.0", 99 | "@webgpu/types": "^0.1.54", 100 | "css-loader": "^6.7.1", 101 | "eslint": "^8.36.0", 102 | "eslint-config-prettier": "^8.8.0", 103 | "eslint-plugin-prettier": "^5.0.0", 104 | "npm-run-all": "^4.1.5", 105 | "prettier": "^3.0.0", 106 | "rimraf": "^5.0.1", 107 | "source-map-loader": "^1.0.2", 108 | "style-loader": "^3.3.1", 109 | "stylelint": "^15.10.1", 110 | "stylelint-config-recommended": "^13.0.0", 111 | "stylelint-config-standard": "^34.0.0", 112 | "stylelint-csstree-validator": "^3.0.0", 113 | "stylelint-prettier": "^4.0.0", 114 | "ts-json-schema-generator": "^2.4.0", 115 | "typescript": "~5.8.3", 116 | "yjs": "^13.5.0" 117 | }, 118 | "sideEffects": [ 119 | "style/*.css", 120 | "style/index.js" 121 | ], 122 | "styleModule": "style/index.js", 123 | "publishConfig": { 124 | "access": "public" 125 | }, 126 | "jupyterlab": { 127 | "extension": true, 128 | "outputDir": "jupyterlite_ai/labextension", 129 | "schemaDir": "schema" 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling>=1.5.0", "jupyterlab>=4.0.0,<5", "hatch-nodejs-version>=0.3.2"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "jupyterlite_ai" 7 | readme = "README.md" 8 | license = { file = "LICENSE" } 9 | requires-python = ">=3.8" 10 | classifiers = [ 11 | "Framework :: Jupyter", 12 | "Framework :: Jupyter :: JupyterLab", 13 | "Framework :: Jupyter :: JupyterLab :: 4", 14 | "Framework :: Jupyter :: JupyterLab :: Extensions", 15 | "Framework :: Jupyter :: JupyterLab :: Extensions :: Prebuilt", 16 | "License :: OSI Approved :: BSD License", 17 | "Programming Language :: Python", 18 | "Programming Language :: Python :: 3", 19 | "Programming Language :: Python :: 3.8", 20 | "Programming Language :: Python :: 3.9", 21 | "Programming Language :: Python :: 3.10", 22 | "Programming Language :: Python :: 3.11", 23 | "Programming Language :: Python :: 3.12", 24 | ] 25 | dependencies = [ 26 | "jupyter-secrets-manager >=0.4,<0.5" 27 | ] 28 | dynamic = ["version", "description", "authors", "urls", "keywords"] 29 | 30 | [project.optional-dependencies] 31 | jupyter = [ 32 | "jupyterlab>=4.4.0", 33 | "jupyterlite>=0.6.0a0", 34 | "notebook>=7.4.0" 35 | ] 36 | 37 | [tool.hatch.version] 38 | source = "nodejs" 39 | 40 | [tool.hatch.metadata.hooks.nodejs] 41 | fields = ["description", "authors", "urls"] 42 | 43 | [tool.hatch.build.targets.sdist] 44 | artifacts = ["jupyterlite_ai/labextension"] 45 | exclude = [".github", "binder"] 46 | 47 | [tool.hatch.build.targets.wheel.shared-data] 48 | "jupyterlite_ai/labextension" = "share/jupyter/labextensions/@jupyterlite/ai" 49 | "install.json" = "share/jupyter/labextensions/@jupyterlite/ai/install.json" 50 | 51 | [tool.hatch.build.hooks.version] 52 | path = "jupyterlite_ai/_version.py" 53 | 54 | [tool.hatch.build.hooks.jupyter-builder] 55 | dependencies = ["hatch-jupyter-builder>=0.5"] 56 | build-function = "hatch_jupyter_builder.npm_builder" 57 | ensured-targets = [ 58 | "jupyterlite_ai/labextension/static/style.js", 59 | "jupyterlite_ai/labextension/package.json", 60 | ] 61 | skip-if-exists = ["jupyterlite_ai/labextension/static/style.js"] 62 | 63 | [tool.hatch.build.hooks.jupyter-builder.build-kwargs] 64 | build_cmd = "build:prod" 65 | npm = ["jlpm"] 66 | 67 | [tool.hatch.build.hooks.jupyter-builder.editable-build-kwargs] 68 | build_cmd = "install:extension" 69 | npm = ["jlpm"] 70 | source_dir = "src" 71 | build_dir = "jupyterlite_ai/labextension" 72 | 73 | [tool.jupyter-releaser.options] 74 | version_cmd = "hatch version" 75 | 76 | [tool.jupyter-releaser.hooks] 77 | before-build-npm = [ 78 | "python -m pip install 'jupyterlab>=4.0.0,<5'", 79 | "jlpm", 80 | "jlpm build:prod" 81 | ] 82 | before-build-python = ["jlpm clean:all"] 83 | 84 | [tool.check-wheel-contents] 85 | ignore = ["W002"] 86 | -------------------------------------------------------------------------------- /schema/chat.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "Chat configuration", 3 | "description": "Configuration for the chat panel", 4 | "jupyter.lab.setting-icon": "jupyter-chat::chat", 5 | "jupyter.lab.setting-icon-label": "Jupyter Chat", 6 | "type": "object", 7 | "properties": { 8 | "sendWithShiftEnter": { 9 | "description": "Whether to send a message via Shift-Enter instead of Enter.", 10 | "type": "boolean", 11 | "default": false, 12 | "readOnly": false 13 | }, 14 | "enableCodeToolbar": { 15 | "description": "Whether to enable or not the code toolbar.", 16 | "type": "boolean", 17 | "default": true, 18 | "readOnly": false 19 | }, 20 | "personaName": { 21 | "type": "string", 22 | "title": "AI persona name", 23 | "description": "The name of the AI persona", 24 | "default": "Jupyternaut" 25 | } 26 | }, 27 | "additionalProperties": false 28 | } 29 | -------------------------------------------------------------------------------- /schema/provider-registry.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "AI provider", 3 | "description": "Provider registry settings", 4 | "jupyter.lab.setting-icon": "@jupyterlite/ai:jupyternaut-lite", 5 | "jupyter.lab.setting-icon-label": "JupyterLite AI Chat", 6 | "type": "object", 7 | "properties": { 8 | "UseSecretsManager": { 9 | "type": "boolean", 10 | "title": "Use secrets manager", 11 | "description": "Whether to use or not the secrets manager. If not, secrets will be stored in the browser (local storage)", 12 | "default": true 13 | }, 14 | "AIprovider": { 15 | "type": "object", 16 | "title": "AI provider", 17 | "description": "The AI provider configuration", 18 | "default": {}, 19 | "additionalProperties": true 20 | } 21 | }, 22 | "additionalProperties": false 23 | } 24 | -------------------------------------------------------------------------------- /scripts/settings-checker.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs'); 2 | const tsj = require('ts-json-schema-generator'); 3 | const path = require('path'); 4 | 5 | const providersDir = 'src/default-providers'; 6 | 7 | let checkError = false; 8 | let generate = false; 9 | if (process.argv.length >= 3) { 10 | if (process.argv[2] === '--generate') { 11 | generate = true; 12 | } else { 13 | throw Error(`Argument '${process.argv[2]}' is not valid.`); 14 | } 15 | } 16 | 17 | if (generate) { 18 | console.log('Building settings schemas\n'); 19 | } else { 20 | console.log('Checking settings schemas\n'); 21 | } 22 | 23 | // Build the langchain BaseLanguageModelParams object 24 | const configBase = { 25 | path: 'node_modules/@langchain/core/dist/language_models/base.d.ts', 26 | tsconfig: './tsconfig.json', 27 | type: 'BaseLanguageModelParams' 28 | }; 29 | 30 | const schemaBase = tsj 31 | .createGenerator(configBase) 32 | .createSchema(configBase.type); 33 | 34 | /** 35 | * The providers are the list of providers for which we'd like to build settings from their interface. 36 | * The keys will be the names of the json files that will be linked to the selected provider. 37 | * The values are: 38 | * - path: path of the module containing the provider input description, in @langchain package. 39 | * - type: the type or interface to format to json settings. 40 | * - excludedProps: (optional) the properties to not include in the settings. 41 | * "ts-json-schema-generator" seems to not handle some imported types, so the workaround is 42 | * to exclude them at the moment, to be able to build other settings. 43 | */ 44 | const providers = { 45 | Anthropic: { 46 | path: 'node_modules/@langchain/anthropic/dist/chat_models.d.ts', 47 | type: 'AnthropicInput', 48 | excludedProps: ['clientOptions'] 49 | }, 50 | ChromeAI: { 51 | path: 'node_modules/@langchain/community/experimental/llms/chrome_ai.d.ts', 52 | type: 'ChromeAIInputs' 53 | }, 54 | MistralAI: { 55 | path: 'node_modules/@langchain/mistralai/dist/chat_models.d.ts', 56 | type: 'ChatMistralAIInput' 57 | }, 58 | Ollama: { 59 | path: 'node_modules/@langchain/ollama/dist/chat_models.d.ts', 60 | type: 'ChatOllamaInput' 61 | }, 62 | OpenAI: { 63 | path: 'node_modules/@langchain/openai/dist/chat_models.d.ts', 64 | type: 'ChatOpenAIFields', 65 | excludedProps: ['configuration'] 66 | }, 67 | WebLLM: { 68 | path: 'node_modules/@langchain/community/chat_models/webllm.d.ts', 69 | type: 'WebLLMInputs', 70 | // TODO: re-enable? 71 | excludedProps: ['appConfig', 'chatOptions'] 72 | } 73 | }; 74 | 75 | Object.entries(providers).forEach(([name, desc], index) => { 76 | const outputDir = path.join(providersDir, name); 77 | const outputPath = path.join(outputDir, 'settings-schema.json'); 78 | if (!generate && !fs.existsSync(outputPath)) { 79 | throw Error(`${outputPath} does not exist`); 80 | } 81 | 82 | // The configuration doesn't include functions, which may probably not be filled 83 | // from the settings panel. 84 | const config = { 85 | path: desc.path, 86 | tsconfig: './tsconfig.json', 87 | type: desc.type, 88 | functions: 'hide', 89 | topRef: false 90 | }; 91 | 92 | // Skip for WebLLM due to ts-json-schema-generator not picking up the typeRoots? 93 | if (name === 'WebLLM') { 94 | config.skipTypeCheck = true; 95 | } 96 | 97 | const generator = tsj.createGenerator(config); 98 | let schema; 99 | 100 | // Workaround to exclude some properties from a type or interface. 101 | if (desc.excludedProps) { 102 | const nodes = generator.getRootNodes(config.type); 103 | const finalMembers = []; 104 | nodes[0].members.forEach(member => { 105 | if (!desc.excludedProps.includes(member.symbol.escapedName)) { 106 | finalMembers.push(member); 107 | } 108 | }); 109 | nodes[0].members = finalMembers; 110 | schema = generator.createSchemaFromNodes(nodes); 111 | } else { 112 | schema = generator.createSchema(config.type); 113 | } 114 | 115 | if (!schema.definitions) { 116 | return; 117 | } 118 | 119 | // Remove the properties from extended class. 120 | const providerKeys = Object.keys(schema.properties); 121 | Object.keys( 122 | schemaBase.definitions?.['BaseLanguageModelParams']['properties'] 123 | ).forEach(key => { 124 | if (providerKeys.includes(key)) { 125 | delete schema.properties?.[key]; 126 | } 127 | }); 128 | 129 | // Replace all references by their value, and remove the useless definitions. 130 | const defKeys = Object.keys(schema.definitions); 131 | for (let i = defKeys.length - 1; i >= 0; i--) { 132 | let schemaString = JSON.stringify(schema); 133 | const key = defKeys[i]; 134 | const reference = `"$ref":"#/definitions/${key}"`; 135 | 136 | // Replace all the references to the definition by the content (after removal of the brace). 137 | const replacement = JSON.stringify(schema.definitions?.[key]).slice(1, -1); 138 | temporarySchemaString = schemaString.replaceAll(reference, replacement); 139 | // Build again the schema from the string representation if it change. 140 | if (schemaString !== temporarySchemaString) { 141 | schema = JSON.parse(temporarySchemaString); 142 | } 143 | // Remove the definition 144 | delete schema.definitions?.[key]; 145 | } 146 | 147 | // Transform the default values. 148 | Object.values(schema.properties).forEach(value => { 149 | const defaultValue = value.default; 150 | if (!defaultValue) { 151 | return; 152 | } 153 | if (value.type === 'number') { 154 | value.default = Number(/{(.*)}/.exec(value.default)?.[1] ?? 0); 155 | } else if (value.type === 'boolean') { 156 | value.default = /{(.*)}/.exec(value.default)?.[1] === 'true'; 157 | } else if (value.type === 'string') { 158 | value.default = /{\"(.*)\"}/.exec(value.default)?.[1] ?? ''; 159 | } 160 | }); 161 | 162 | let schemaString = JSON.stringify(schema, null, 2); 163 | schemaString += '\n'; 164 | if (generate) { 165 | if (!fs.existsSync(outputDir)) { 166 | fs.mkdirSync(outputDir); 167 | } 168 | // Write JSON file. 169 | fs.writeFileSync(outputPath, schemaString, err => { 170 | if (err) { 171 | throw err; 172 | } 173 | }); 174 | } else { 175 | const currentContent = fs.readFileSync(outputPath, { encoding: 'utf-8' }); 176 | if (currentContent !== schemaString) { 177 | checkError = true; 178 | console.log(`\x1b[31mX \x1b[0m${name}`); 179 | } else { 180 | console.log(`\x1b[32m\u2713 \x1b[0m${name}`); 181 | } 182 | } 183 | }); 184 | 185 | if (generate) { 186 | console.log('Settings schemas built\n'); 187 | console.log('=====================\n'); 188 | } else if (checkError) { 189 | console.error('Please run "jlpm settings:build" to fix it.'); 190 | process.exit(1); 191 | } else { 192 | console.log('Settings schemas checked successfully\n'); 193 | } 194 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | __import__("setuptools").setup() 2 | -------------------------------------------------------------------------------- /src/base-completer.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { ReadonlyPartialJSONObject } from '@lumino/coreutils'; 6 | 7 | export interface IBaseCompleter { 8 | /** 9 | * The completion prompt. 10 | */ 11 | prompt: string; 12 | 13 | /** 14 | * The function to fetch a new completion. 15 | */ 16 | requestCompletion?: () => void; 17 | 18 | /** 19 | * The fetch request for the LLM completer. 20 | */ 21 | fetch( 22 | request: CompletionHandler.IRequest, 23 | context: IInlineCompletionContext 24 | ): Promise; 25 | } 26 | 27 | /** 28 | * The namespace for the base completer. 29 | */ 30 | export namespace BaseCompleter { 31 | /** 32 | * The options for the constructor of a completer. 33 | */ 34 | export interface IOptions { 35 | settings: ReadonlyPartialJSONObject; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/chat-handler.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Jupyter Development Team. 3 | * Distributed under the terms of the Modified BSD License. 4 | */ 5 | 6 | import { 7 | ChatCommand, 8 | AbstractChatContext, 9 | AbstractChatModel, 10 | IChatCommandProvider, 11 | IChatContext, 12 | IChatHistory, 13 | IChatMessage, 14 | IChatModel, 15 | IInputModel, 16 | INewMessage 17 | } from '@jupyter/chat'; 18 | import { 19 | AIMessage, 20 | HumanMessage, 21 | mergeMessageRuns, 22 | SystemMessage 23 | } from '@langchain/core/messages'; 24 | import { UUID } from '@lumino/coreutils'; 25 | 26 | import { jupyternautLiteIcon } from './icons'; 27 | import { chatSystemPrompt } from './provider'; 28 | import { IAIProviderRegistry } from './tokens'; 29 | import { AIChatModel } from './types/ai-model'; 30 | 31 | /** 32 | * The base64 encoded SVG string of the jupyternaut lite icon. 33 | * Encode so it can be passed as avatar_url to jupyter-chat. 34 | */ 35 | const AI_AVATAR_BASE64 = btoa(jupyternautLiteIcon.svgstr); 36 | const AI_AVATAR = `data:image/svg+xml;base64,${AI_AVATAR_BASE64}`; 37 | 38 | export const welcomeMessage = (providers: string[]) => ` 39 | #### Ask JupyterLite AI 40 | 41 | 42 | The provider to use can be set in the settings editor, by selecting it from 43 | the _AI provider_ settings. 44 | 45 | The current providers that are available are _${providers.sort().join('_, _')}_. 46 | 47 | To clear the chat, you can use the \`/clear\` command from the chat input. 48 | `; 49 | 50 | export type ConnectionMessage = { 51 | type: 'connection'; 52 | client_id: string; 53 | }; 54 | 55 | export class ChatHandler extends AbstractChatModel { 56 | constructor(options: ChatHandler.IOptions) { 57 | super(options); 58 | this._providerRegistry = options.providerRegistry; 59 | this._prompt = chatSystemPrompt({ 60 | provider_name: this._providerRegistry.currentName 61 | }); 62 | 63 | this._providerRegistry.providerChanged.connect(() => { 64 | this._errorMessage = this._providerRegistry.chatError; 65 | this._prompt = chatSystemPrompt({ 66 | provider_name: this._providerRegistry.currentName 67 | }); 68 | }); 69 | } 70 | 71 | get provider(): AIChatModel | null { 72 | return this._providerRegistry.currentChatModel; 73 | } 74 | 75 | /** 76 | * Getter and setter for the persona name. 77 | */ 78 | get personaName(): string { 79 | return this._personaName; 80 | } 81 | set personaName(value: string) { 82 | this.messages.forEach(message => { 83 | if (message.sender.username === this._personaName) { 84 | const updated: IChatMessage = { ...message }; 85 | updated.sender.username = value; 86 | this.messageAdded(updated); 87 | } 88 | }); 89 | this._personaName = value; 90 | } 91 | 92 | /** 93 | * Getter and setter for the initial prompt. 94 | */ 95 | get prompt(): string { 96 | return this._prompt; 97 | } 98 | set prompt(value: string) { 99 | this._prompt = value; 100 | } 101 | 102 | async sendMessage(message: INewMessage): Promise { 103 | const body = message.body; 104 | if (body.startsWith('/clear')) { 105 | // TODO: do we need a clear method? 106 | this.messagesDeleted(0, this.messages.length); 107 | this._history.messages = []; 108 | return false; 109 | } 110 | message.id = UUID.uuid4(); 111 | const msg: IChatMessage = { 112 | id: message.id, 113 | body, 114 | sender: { username: 'User' }, 115 | time: Date.now(), 116 | type: 'msg' 117 | }; 118 | this.messageAdded(msg); 119 | 120 | if (this._providerRegistry.currentChatModel === null) { 121 | const errorMsg: IChatMessage = { 122 | id: UUID.uuid4(), 123 | body: `**${this._errorMessage ? this._errorMessage : this._defaultErrorMessage}**`, 124 | sender: { username: 'ERROR' }, 125 | time: Date.now(), 126 | type: 'msg' 127 | }; 128 | this.messageAdded(errorMsg); 129 | return false; 130 | } 131 | 132 | this._history.messages.push(msg); 133 | 134 | const messages = mergeMessageRuns([new SystemMessage(this._prompt)]); 135 | messages.push( 136 | ...this._history.messages.map(msg => { 137 | if (msg.sender.username === 'User') { 138 | return new HumanMessage(msg.body); 139 | } 140 | return new AIMessage(msg.body); 141 | }) 142 | ); 143 | 144 | const sender = { username: this._personaName, avatar_url: AI_AVATAR }; 145 | this.updateWriters([{ user: sender }]); 146 | 147 | // create an empty message to be filled by the AI provider 148 | const botMsg: IChatMessage = { 149 | id: UUID.uuid4(), 150 | body: '', 151 | sender, 152 | time: Date.now(), 153 | type: 'msg' 154 | }; 155 | 156 | let content = ''; 157 | 158 | this._controller = new AbortController(); 159 | try { 160 | for await (const chunk of await this._providerRegistry.currentChatModel.stream( 161 | messages, 162 | { signal: this._controller.signal } 163 | )) { 164 | content += chunk.content ?? chunk; 165 | botMsg.body = content; 166 | this.messageAdded(botMsg); 167 | } 168 | this._history.messages.push(botMsg); 169 | return true; 170 | } catch (reason) { 171 | const error = this._providerRegistry.formatErrorMessage(reason); 172 | const errorMsg: IChatMessage = { 173 | id: UUID.uuid4(), 174 | body: `**${error}**`, 175 | sender: { username: 'ERROR' }, 176 | time: Date.now(), 177 | type: 'msg' 178 | }; 179 | this.messageAdded(errorMsg); 180 | return false; 181 | } finally { 182 | this.updateWriters([]); 183 | this._controller = null; 184 | } 185 | } 186 | 187 | async getHistory(): Promise { 188 | return this._history; 189 | } 190 | 191 | dispose(): void { 192 | super.dispose(); 193 | } 194 | 195 | messageAdded(message: IChatMessage): void { 196 | super.messageAdded(message); 197 | } 198 | 199 | stopStreaming(): void { 200 | this._controller?.abort(); 201 | } 202 | 203 | createChatContext(): IChatContext { 204 | return new ChatHandler.ChatContext({ model: this }); 205 | } 206 | 207 | private _providerRegistry: IAIProviderRegistry; 208 | private _personaName = 'AI'; 209 | private _prompt: string; 210 | private _errorMessage: string = ''; 211 | private _history: IChatHistory = { messages: [] }; 212 | private _defaultErrorMessage = 'AI provider not configured'; 213 | private _controller: AbortController | null = null; 214 | } 215 | 216 | export namespace ChatHandler { 217 | /** 218 | * The options used to create a chat handler. 219 | */ 220 | export interface IOptions extends IChatModel.IOptions { 221 | providerRegistry: IAIProviderRegistry; 222 | } 223 | 224 | /** 225 | * The minimal chat context. 226 | */ 227 | export class ChatContext extends AbstractChatContext { 228 | users = []; 229 | } 230 | 231 | /** 232 | * The chat command provider for the chat. 233 | */ 234 | export class ClearCommandProvider implements IChatCommandProvider { 235 | public id: string = '@jupyterlite/ai:clear-commands'; 236 | private _slash_commands: ChatCommand[] = [ 237 | { 238 | name: '/clear', 239 | providerId: this.id, 240 | replaceWith: '/clear', 241 | description: 'Clear the chat' 242 | } 243 | ]; 244 | async getChatCommands(inputModel: IInputModel) { 245 | const match = inputModel.currentWord?.match(/^\/\w*/)?.[0]; 246 | if (!match) { 247 | return []; 248 | } 249 | 250 | const commands = this._slash_commands.filter(cmd => 251 | cmd.name.startsWith(match) 252 | ); 253 | return commands; 254 | } 255 | 256 | async handleChatCommand( 257 | command: ChatCommand, 258 | inputModel: IInputModel 259 | ): Promise { 260 | // no handling needed because `replaceWith` is set in each command. 261 | return; 262 | } 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /src/completion-provider.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext, 4 | IInlineCompletionProvider 5 | } from '@jupyterlab/completer'; 6 | 7 | import { IAIProviderRegistry } from './tokens'; 8 | import { AICompleter } from './types/ai-model'; 9 | 10 | /** 11 | * The generic completion provider to register to the completion provider manager. 12 | */ 13 | export class CompletionProvider implements IInlineCompletionProvider { 14 | readonly identifier = '@jupyterlite/ai'; 15 | 16 | constructor(options: CompletionProvider.IOptions) { 17 | this._providerRegistry = options.providerRegistry; 18 | this._requestCompletion = options.requestCompletion; 19 | 20 | this._providerRegistry.providerChanged.connect(() => { 21 | if (this.completer) { 22 | this.completer.requestCompletion = this._requestCompletion; 23 | } 24 | }); 25 | } 26 | 27 | /** 28 | * Get the current completer name. 29 | */ 30 | get name(): string { 31 | return this._providerRegistry.currentName; 32 | } 33 | 34 | /** 35 | * Get the current completer. 36 | */ 37 | get completer(): AICompleter | null { 38 | return this._providerRegistry.currentCompleter; 39 | } 40 | 41 | async fetch( 42 | request: CompletionHandler.IRequest, 43 | context: IInlineCompletionContext 44 | ) { 45 | return this.completer?.fetch(request, context); 46 | } 47 | 48 | private _providerRegistry: IAIProviderRegistry; 49 | private _requestCompletion: () => void; 50 | } 51 | 52 | export namespace CompletionProvider { 53 | export interface IOptions { 54 | /** 55 | * The registry where the completion provider belongs. 56 | */ 57 | providerRegistry: IAIProviderRegistry; 58 | /** 59 | * The request completion commands, can be useful if a provider needs to request 60 | * the completion by itself. 61 | */ 62 | requestCompletion: () => void; 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/components/stop-button.tsx: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Jupyter Development Team. 3 | * Distributed under the terms of the Modified BSD License. 4 | */ 5 | 6 | import StopIcon from '@mui/icons-material/Stop'; 7 | import React from 'react'; 8 | 9 | import { InputToolbarRegistry, TooltippedButton } from '@jupyter/chat'; 10 | 11 | /** 12 | * Properties of the stop button. 13 | */ 14 | export interface IStopButtonProps 15 | extends InputToolbarRegistry.IToolbarItemProps { 16 | /** 17 | * The function to stop streaming. 18 | */ 19 | stopStreaming: () => void; 20 | } 21 | 22 | /** 23 | * The stop button. 24 | */ 25 | export function StopButton(props: IStopButtonProps): JSX.Element { 26 | const tooltip = 'Stop streaming'; 27 | return ( 28 | 37 | 38 | 39 | ); 40 | } 41 | 42 | /** 43 | * factory returning the toolbar item. 44 | */ 45 | export function stopItem( 46 | stopStreaming: () => void 47 | ): InputToolbarRegistry.IToolbarItem { 48 | return { 49 | element: (props: InputToolbarRegistry.IToolbarItemProps) => { 50 | const stopProps: IStopButtonProps = { ...props, stopStreaming }; 51 | return StopButton(stopProps); 52 | }, 53 | position: 50, 54 | hidden: true /* hidden by default */ 55 | }; 56 | } 57 | -------------------------------------------------------------------------------- /src/default-providers/Anthropic/completer.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { ChatAnthropic } from '@langchain/anthropic'; 6 | import { AIMessage, SystemMessage } from '@langchain/core/messages'; 7 | 8 | import { BaseCompleter, IBaseCompleter } from '../../base-completer'; 9 | import { COMPLETION_SYSTEM_PROMPT } from '../../provider'; 10 | 11 | export class AnthropicCompleter implements IBaseCompleter { 12 | constructor(options: BaseCompleter.IOptions) { 13 | this._completer = new ChatAnthropic({ ...options.settings }); 14 | } 15 | 16 | /** 17 | * Getter and setter for the initial prompt. 18 | */ 19 | get prompt(): string { 20 | return this._prompt; 21 | } 22 | set prompt(value: string) { 23 | this._prompt = value; 24 | } 25 | 26 | async fetch( 27 | request: CompletionHandler.IRequest, 28 | context: IInlineCompletionContext 29 | ) { 30 | const { text, offset: cursorOffset } = request; 31 | const prompt = text.slice(0, cursorOffset); 32 | 33 | // Anthropic does not allow whitespace at the end of the AIMessage 34 | const trimmedPrompt = prompt.trim(); 35 | 36 | const messages = [ 37 | new SystemMessage(this._prompt), 38 | new AIMessage(trimmedPrompt) 39 | ]; 40 | 41 | try { 42 | const response = await this._completer.invoke(messages); 43 | const items = []; 44 | 45 | // Anthropic can return string or complex content, a list of string/images/other. 46 | if (typeof response.content === 'string') { 47 | items.push({ 48 | insertText: response.content 49 | }); 50 | } else { 51 | response.content.forEach(content => { 52 | if (content.type !== 'text') { 53 | return; 54 | } 55 | items.push({ 56 | insertText: content.text, 57 | filterText: prompt.substring(trimmedPrompt.length) 58 | }); 59 | }); 60 | } 61 | return { items }; 62 | } catch (error) { 63 | console.error('Error fetching completions', error); 64 | return { items: [] }; 65 | } 66 | } 67 | 68 | private _completer: ChatAnthropic; 69 | private _prompt: string = COMPLETION_SYSTEM_PROMPT; 70 | } 71 | -------------------------------------------------------------------------------- /src/default-providers/Anthropic/settings-schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "properties": { 5 | "temperature": { 6 | "type": "number", 7 | "description": "Amount of randomness injected into the response. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and temp closer to 1 for creative and generative tasks." 8 | }, 9 | "topK": { 10 | "type": "number", 11 | "description": "Only sample from the top K options for each subsequent token. Used to remove \"long tail\" low probability responses. Defaults to -1, which disables it." 12 | }, 13 | "topP": { 14 | "type": "number", 15 | "description": "Does nucleus sampling, in which we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. Defaults to -1, which disables it. Note that you should either alter temperature or top_p, but not both." 16 | }, 17 | "maxTokens": { 18 | "type": "number", 19 | "description": "A maximum number of tokens to generate before stopping." 20 | }, 21 | "maxTokensToSample": { 22 | "type": "number", 23 | "description": "A maximum number of tokens to generate before stopping.", 24 | "deprecated": "Use \"maxTokens\" instead." 25 | }, 26 | "stopSequences": { 27 | "type": "array", 28 | "items": { 29 | "type": "string" 30 | }, 31 | "description": "A list of strings upon which to stop generating. You probably want `[\"\\n\\nHuman:\"]`, as that's the cue for the next turn in the dialog agent." 32 | }, 33 | "streaming": { 34 | "type": "boolean", 35 | "description": "Whether to stream the results or not" 36 | }, 37 | "anthropicApiKey": { 38 | "type": "string", 39 | "description": "Anthropic API key" 40 | }, 41 | "apiKey": { 42 | "type": "string", 43 | "description": "Anthropic API key" 44 | }, 45 | "anthropicApiUrl": { 46 | "type": "string", 47 | "description": "Anthropic API URL" 48 | }, 49 | "modelName": { 50 | "type": "string", 51 | "deprecated": "Use \"model\" instead" 52 | }, 53 | "model": { 54 | "type": "string", 55 | "description": "Model name to use" 56 | }, 57 | "invocationKwargs": { 58 | "type": "object", 59 | "description": "Holds any additional parameters that are valid to pass to {@link * https://console.anthropic.com/docs/api/reference | } * `anthropic.messages`} that are not explicitly specified on this class." 60 | }, 61 | "streamUsage": { 62 | "type": "boolean", 63 | "description": "Whether or not to include token usage data in streamed chunks.", 64 | "default": false 65 | } 66 | }, 67 | "additionalProperties": false, 68 | "description": "Input to AnthropicChat class.", 69 | "definitions": {} 70 | } 71 | -------------------------------------------------------------------------------- /src/default-providers/ChromeAI/completer.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai'; 6 | import { HumanMessage, SystemMessage } from '@langchain/core/messages'; 7 | 8 | import { BaseCompleter, IBaseCompleter } from '../../base-completer'; 9 | import { COMPLETION_SYSTEM_PROMPT } from '../../provider'; 10 | 11 | /** 12 | * Regular expression to match the '```' string at the start of a string. 13 | * So the completions returned by the LLM can still be kept after removing the code block formatting. 14 | * 15 | * For example, if the response contains the following content after typing `import pandas`: 16 | * 17 | * ```python 18 | * as pd 19 | * ``` 20 | * 21 | * The formatting string after removing the code block delimiters will be: 22 | * 23 | * as pd 24 | */ 25 | const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/; 26 | 27 | /** 28 | * Regular expression to match the '```' string at the end of a string. 29 | */ 30 | const CODE_BLOCK_END_REGEX = /```$/; 31 | 32 | export class ChromeCompleter implements IBaseCompleter { 33 | constructor(options: BaseCompleter.IOptions) { 34 | this._completer = new ChromeAI({ ...options.settings }); 35 | } 36 | 37 | /** 38 | * Getter and setter for the initial prompt. 39 | */ 40 | get prompt(): string { 41 | return this._prompt; 42 | } 43 | set prompt(value: string) { 44 | this._prompt = value; 45 | } 46 | 47 | async fetch( 48 | request: CompletionHandler.IRequest, 49 | context: IInlineCompletionContext 50 | ) { 51 | const { text, offset: cursorOffset } = request; 52 | const prompt = text.slice(0, cursorOffset); 53 | 54 | const trimmedPrompt = prompt.trim(); 55 | 56 | const messages = [ 57 | new SystemMessage(this._prompt), 58 | new HumanMessage(trimmedPrompt) 59 | ]; 60 | 61 | try { 62 | let response = await this._completer.invoke(messages); 63 | 64 | // ChromeAI sometimes returns a string starting with '```', 65 | // so process the response to remove the code block delimiters 66 | if (CODE_BLOCK_START_REGEX.test(response)) { 67 | response = response 68 | .replace(CODE_BLOCK_START_REGEX, '') 69 | .replace(CODE_BLOCK_END_REGEX, ''); 70 | } 71 | 72 | const items = [{ insertText: response }]; 73 | return { 74 | items 75 | }; 76 | } catch (error) { 77 | console.error('Error fetching completion:', error); 78 | return { items: [] }; 79 | } 80 | } 81 | 82 | private _completer: ChromeAI; 83 | private _prompt: string = COMPLETION_SYSTEM_PROMPT; 84 | } 85 | -------------------------------------------------------------------------------- /src/default-providers/ChromeAI/instructions.ts: -------------------------------------------------------------------------------- 1 | export default ` 2 | Support for ChromeAI is still experimental and only available in Google Chrome. 3 | 4 | You can test ChromeAI is enabled in your browser by going to the following URL: 5 | 6 | Enable the proper flags in Google Chrome. 7 | 8 | - chrome://flags/#prompt-api-for-gemini-nano 9 | - Select: \`Enabled\` 10 | - chrome://flags/#optimization-guide-on-device-model 11 | - Select: \`Enabled BypassPrefRequirement\` 12 | - chrome://components 13 | - Click \`Check for Update\` on Optimization Guide On Device Model to download the model 14 | - [Optional] chrome://flags/#text-safety-classifier 15 | 16 | A screenshot showing how to enable the ChromeAI flag in Google Chrome 17 | 18 | Then restart Chrome for these changes to take effect. 19 | 20 | On first use, Chrome will download the on-device model, which can be as large as 22GB (according to their docs and at the time of writing). 21 | During the download, ChromeAI may not be available via the extension. 22 | 23 | For more information about Chrome Built-in AI: 24 | `; 25 | 26 | /** 27 | * Check if the browser supports ChromeAI and the model is available. 28 | */ 29 | export async function compatibilityCheck(): Promise { 30 | // Check if the browser supports the ChromeAI model 31 | if ( 32 | typeof window === 'undefined' || 33 | !('LanguageModel' in window) || 34 | window.LanguageModel === undefined || 35 | (window.LanguageModel as any).availability === undefined 36 | ) { 37 | return 'Your browser does not support ChromeAI. Please use an updated chrome based browser like Google Chrome, and follow the instructions in settings to enable it.'; 38 | } 39 | const languageModel = window.LanguageModel as any; 40 | if (!(await languageModel.availability())) { 41 | return 'The ChromeAI model is not available in your browser. Please ensure you have enabled the necessary flags in Google Chrome as described in the instructions in settings.'; 42 | } 43 | // If the model is available, return null to indicate compatibility 44 | return null; 45 | } 46 | -------------------------------------------------------------------------------- /src/default-providers/ChromeAI/settings-schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "properties": { 5 | "concurrency": { 6 | "type": "number", 7 | "deprecated": "Use `maxConcurrency` instead" 8 | }, 9 | "topK": { 10 | "type": "number" 11 | }, 12 | "temperature": { 13 | "type": "number" 14 | }, 15 | "systemPrompt": { 16 | "type": "string" 17 | } 18 | }, 19 | "additionalProperties": false, 20 | "definitions": {} 21 | } 22 | -------------------------------------------------------------------------------- /src/default-providers/MistralAI/completer.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { 6 | BaseMessage, 7 | HumanMessage, 8 | SystemMessage 9 | } from '@langchain/core/messages'; 10 | import { ChatMistralAI } from '@langchain/mistralai'; 11 | import { Throttler } from '@lumino/polling'; 12 | 13 | import { BaseCompleter, IBaseCompleter } from '../../base-completer'; 14 | import { COMPLETION_SYSTEM_PROMPT } from '../../provider'; 15 | 16 | /** 17 | * The Mistral API has a rate limit of 1 request per second 18 | */ 19 | const INTERVAL = 1000; 20 | 21 | export class CodestralCompleter implements IBaseCompleter { 22 | constructor(options: BaseCompleter.IOptions) { 23 | this._completer = new ChatMistralAI({ ...options.settings }); 24 | this._throttler = new Throttler( 25 | async (messages: BaseMessage[]) => { 26 | const response = await this._completer.invoke(messages); 27 | // Extract results of completion request. 28 | const items = []; 29 | if (typeof response.content === 'string') { 30 | items.push({ 31 | insertText: response.content 32 | }); 33 | } else { 34 | response.content.forEach(content => { 35 | if (content.type !== 'text') { 36 | return; 37 | } 38 | items.push({ 39 | insertText: content.text 40 | }); 41 | }); 42 | } 43 | return { items }; 44 | }, 45 | { limit: INTERVAL } 46 | ); 47 | } 48 | 49 | /** 50 | * Getter and setter for the initial prompt. 51 | */ 52 | get prompt(): string { 53 | return this._prompt; 54 | } 55 | set prompt(value: string) { 56 | this._prompt = value; 57 | } 58 | 59 | async fetch( 60 | request: CompletionHandler.IRequest, 61 | context: IInlineCompletionContext 62 | ) { 63 | const { text, offset: cursorOffset } = request; 64 | const prompt = text.slice(0, cursorOffset); 65 | 66 | const messages: BaseMessage[] = [ 67 | new SystemMessage(this._prompt), 68 | new HumanMessage(prompt) 69 | ]; 70 | 71 | try { 72 | return await this._throttler.invoke(messages); 73 | } catch (error) { 74 | console.error('Error fetching completions', error); 75 | return { items: [] }; 76 | } 77 | } 78 | 79 | private _throttler: Throttler; 80 | private _completer: ChatMistralAI; 81 | private _prompt: string = COMPLETION_SYSTEM_PROMPT; 82 | } 83 | -------------------------------------------------------------------------------- /src/default-providers/MistralAI/instructions.ts: -------------------------------------------------------------------------------- 1 | export default ` 2 | This extension is still very much experimental. It is not an official MistralAI extension. 3 | 4 | 1. Go to and create an API key. 5 | 6 | Screenshot showing how to create an API key 7 | 8 | 2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`MistralAI\` 9 | provider and the API key (required). 10 | 11 | Screenshot showing how to add the API key to the settings 12 | 13 | 3. Open the chat, or use the inline completer 14 | 15 | Screenshot showing how to use the chat 16 | `; 17 | -------------------------------------------------------------------------------- /src/default-providers/MistralAI/settings-schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "properties": { 5 | "streamUsage": { 6 | "type": "boolean", 7 | "description": "Whether or not to include token usage in the stream.", 8 | "default": true 9 | }, 10 | "disableStreaming": { 11 | "type": "boolean", 12 | "description": "Whether to disable streaming.\n\nIf streaming is bypassed, then `stream()` will defer to `invoke()`.\n\n- If true, will always bypass streaming case.\n- If false (default), will always use streaming case if available." 13 | }, 14 | "apiKey": { 15 | "type": "string", 16 | "description": "The API key to use.", 17 | "default": "" 18 | }, 19 | "modelName": { 20 | "type": "string", 21 | "description": "The name of the model to use. Alias for `model`", 22 | "default": "mistral-small-latest" 23 | }, 24 | "model": { 25 | "type": "string", 26 | "description": "The name of the model to use.", 27 | "default": "mistral-small-latest" 28 | }, 29 | "endpoint": { 30 | "type": "string", 31 | "description": "Override the default endpoint." 32 | }, 33 | "temperature": { 34 | "type": "number", 35 | "description": "What sampling temperature to use, between 0.0 and 2.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.", 36 | "default": 0.7 37 | }, 38 | "topP": { 39 | "type": "number", 40 | "description": "Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. Should be between 0 and 1.", 41 | "default": 1 42 | }, 43 | "maxTokens": { 44 | "type": "number", 45 | "description": "The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length." 46 | }, 47 | "streaming": { 48 | "type": "boolean", 49 | "description": "Whether or not to stream the response.", 50 | "default": false 51 | }, 52 | "safeMode": { 53 | "type": "boolean", 54 | "description": "Whether to inject a safety prompt before all conversations.", 55 | "default": false, 56 | "deprecated": "use safePrompt instead" 57 | }, 58 | "safePrompt": { 59 | "type": "boolean", 60 | "description": "Whether to inject a safety prompt before all conversations.", 61 | "default": false 62 | }, 63 | "randomSeed": { 64 | "type": "number", 65 | "description": "The seed to use for random sampling. If set, different calls will generate deterministic results. Alias for `seed`" 66 | }, 67 | "seed": { 68 | "type": "number", 69 | "description": "The seed to use for random sampling. If set, different calls will generate deterministic results." 70 | } 71 | }, 72 | "additionalProperties": false, 73 | "description": "Input to chat model class.", 74 | "definitions": {} 75 | } 76 | -------------------------------------------------------------------------------- /src/default-providers/Ollama/completer.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { AIMessage, SystemMessage } from '@langchain/core/messages'; 6 | import { ChatOllama } from '@langchain/ollama'; 7 | 8 | import { BaseCompleter, IBaseCompleter } from '../../base-completer'; 9 | import { COMPLETION_SYSTEM_PROMPT } from '../../provider'; 10 | 11 | export class OllamaCompleter implements IBaseCompleter { 12 | constructor(options: BaseCompleter.IOptions) { 13 | this._completer = new ChatOllama({ ...options.settings }); 14 | } 15 | 16 | /** 17 | * Getter and setter for the initial prompt. 18 | */ 19 | get prompt(): string { 20 | return this._prompt; 21 | } 22 | set prompt(value: string) { 23 | this._prompt = value; 24 | } 25 | 26 | async fetch( 27 | request: CompletionHandler.IRequest, 28 | context: IInlineCompletionContext 29 | ) { 30 | const { text, offset: cursorOffset } = request; 31 | const prompt = text.slice(0, cursorOffset); 32 | 33 | const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)]; 34 | 35 | try { 36 | const response = await this._completer.invoke(messages); 37 | const items = []; 38 | if (typeof response.content === 'string') { 39 | items.push({ 40 | insertText: response.content 41 | }); 42 | } else { 43 | response.content.forEach(content => { 44 | if (content.type !== 'text') { 45 | return; 46 | } 47 | items.push({ 48 | insertText: content.text, 49 | filterText: prompt.substring(prompt.length) 50 | }); 51 | }); 52 | } 53 | return { items }; 54 | } catch (error) { 55 | console.error('Error fetching completions', error); 56 | return { items: [] }; 57 | } 58 | } 59 | 60 | private _completer: ChatOllama; 61 | private _prompt: string = COMPLETION_SYSTEM_PROMPT; 62 | } 63 | -------------------------------------------------------------------------------- /src/default-providers/Ollama/instructions.ts: -------------------------------------------------------------------------------- 1 | export default ` 2 | Ollama allows to run large language models locally on your machine. 3 | To use it you need to install the Ollama CLI and pull the model you want to use. 4 | 5 | 1. Install the Ollama CLI by following the instructions at 6 | 7 | 2. Pull the model you want to use by running the following command in your terminal: 8 | 9 | \`\`\`bash 10 | ollama pull 11 | \`\`\` 12 | 13 | For example, to pull the Llama 2 model, run: 14 | 15 | \`\`\`bash 16 | ollama pull llama2 17 | \`\`\` 18 | 19 | 3. Once the model is pulled, you can use it in your application by running the following command: 20 | 21 | \`\`\`bash 22 | ollama serve 23 | \`\`\` 24 | 25 | 4. This model will be available in the extension, using the model name you used in the command above. 26 | 27 |
28 | Deploying Lite/Lab on external server 29 | 30 | See https://objectgraph.com/blog/ollama-cors/ for more details. 31 | 32 | On Linux, you can run the following commands: 33 | 34 | 1. Check if CORS is enabled on the server. You can do this by running the following command in your terminal: 35 | 36 | \`\`\`bash 37 | curl -X OPTIONS http://localhost:11434 -H "Origin: http://example.com" -H "Access-Control-Request-Method: GET" -I 38 | \`\`\` 39 | 40 | If CORS is disabled, you will see a response like this: 41 | 42 | \`\`\`bash 43 | HTTP/1.1 403 Forbidden 44 | Date: Wed, 09 Oct 2024 10:12:15 GMT 45 | Content-Length: 0 46 | \`\`\` 47 | 48 | 2. If CORS is not enabled, update _/etc/systemd/system/ollama.service_ with: 49 | 50 | \`\`\`bash 51 | [Service] 52 | Environment="OLLAMA_HOST=0.0.0.0" 53 | Environment="OLLAMA_ORIGINS=*" 54 | \`\`\` 55 | 56 | 3. Restart the service: 57 | 58 | \`\`\`bash 59 | sudo systemctl daemon-reload 60 | sudo systemctl restart ollama 61 | \`\`\` 62 | 63 | 4. Check if CORS is enabled on the server again by running the following command in your terminal: 64 | 65 | \`\`\`bash 66 | curl -X OPTIONS http://localhost:11434 -H "Origin: http://example.com" -H "Access-Control-Request-Method: GET" -I 67 | \`\`\` 68 | 69 |
70 | `; 71 | -------------------------------------------------------------------------------- /src/default-providers/Ollama/settings-schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "properties": { 5 | "numa": { 6 | "type": "boolean" 7 | }, 8 | "numCtx": { 9 | "type": "number" 10 | }, 11 | "numBatch": { 12 | "type": "number" 13 | }, 14 | "numGpu": { 15 | "type": "number" 16 | }, 17 | "mainGpu": { 18 | "type": "number" 19 | }, 20 | "lowVram": { 21 | "type": "boolean" 22 | }, 23 | "f16Kv": { 24 | "type": "boolean" 25 | }, 26 | "logitsAll": { 27 | "type": "boolean" 28 | }, 29 | "vocabOnly": { 30 | "type": "boolean" 31 | }, 32 | "useMmap": { 33 | "type": "boolean" 34 | }, 35 | "useMlock": { 36 | "type": "boolean" 37 | }, 38 | "embeddingOnly": { 39 | "type": "boolean" 40 | }, 41 | "numThread": { 42 | "type": "number" 43 | }, 44 | "numKeep": { 45 | "type": "number" 46 | }, 47 | "seed": { 48 | "type": "number" 49 | }, 50 | "numPredict": { 51 | "type": "number" 52 | }, 53 | "topK": { 54 | "type": "number" 55 | }, 56 | "topP": { 57 | "type": "number" 58 | }, 59 | "tfsZ": { 60 | "type": "number" 61 | }, 62 | "typicalP": { 63 | "type": "number" 64 | }, 65 | "repeatLastN": { 66 | "type": "number" 67 | }, 68 | "temperature": { 69 | "type": "number" 70 | }, 71 | "repeatPenalty": { 72 | "type": "number" 73 | }, 74 | "presencePenalty": { 75 | "type": "number" 76 | }, 77 | "frequencyPenalty": { 78 | "type": "number" 79 | }, 80 | "mirostat": { 81 | "type": "number" 82 | }, 83 | "mirostatTau": { 84 | "type": "number" 85 | }, 86 | "mirostatEta": { 87 | "type": "number" 88 | }, 89 | "penalizeNewline": { 90 | "type": "boolean" 91 | }, 92 | "keepAlive": { 93 | "type": [ 94 | "string", 95 | "number" 96 | ], 97 | "default": "5m" 98 | }, 99 | "stop": { 100 | "type": "array", 101 | "items": { 102 | "type": "string" 103 | } 104 | }, 105 | "disableStreaming": { 106 | "type": "boolean", 107 | "description": "Whether to disable streaming.\n\nIf streaming is bypassed, then `stream()` will defer to `invoke()`.\n\n- If true, will always bypass streaming case.\n- If false (default), will always use streaming case if available." 108 | }, 109 | "model": { 110 | "type": "string", 111 | "description": "The model to invoke. If the model does not exist, it will be pulled.", 112 | "default": "" 113 | }, 114 | "baseUrl": { 115 | "type": "string", 116 | "description": "The host URL of the Ollama server.", 117 | "default": "" 118 | }, 119 | "headers": { 120 | "type": "object", 121 | "additionalProperties": false, 122 | "description": "Optional HTTP Headers to include in the request." 123 | }, 124 | "checkOrPullModel": { 125 | "type": "boolean", 126 | "description": "Whether or not to check the model exists on the local machine before invoking it. If set to `true`, the model will be pulled if it does not exist.", 127 | "default": false 128 | }, 129 | "streaming": { 130 | "type": "boolean" 131 | }, 132 | "format": { 133 | "anyOf": [ 134 | { 135 | "type": "string" 136 | }, 137 | { 138 | "type": "object" 139 | } 140 | ] 141 | } 142 | }, 143 | "additionalProperties": false, 144 | "description": "Input to chat model class.", 145 | "definitions": {} 146 | } 147 | -------------------------------------------------------------------------------- /src/default-providers/OpenAI/completer.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { AIMessage, SystemMessage } from '@langchain/core/messages'; 6 | import { ChatOpenAI } from '@langchain/openai'; 7 | 8 | import { BaseCompleter, IBaseCompleter } from '../../base-completer'; 9 | import { COMPLETION_SYSTEM_PROMPT } from '../../provider'; 10 | 11 | export class OpenAICompleter implements IBaseCompleter { 12 | constructor(options: BaseCompleter.IOptions) { 13 | this._completer = new ChatOpenAI({ ...options.settings }); 14 | } 15 | 16 | /** 17 | * Getter and setter for the initial prompt. 18 | */ 19 | get prompt(): string { 20 | return this._prompt; 21 | } 22 | set prompt(value: string) { 23 | this._prompt = value; 24 | } 25 | 26 | async fetch( 27 | request: CompletionHandler.IRequest, 28 | context: IInlineCompletionContext 29 | ) { 30 | const { text, offset: cursorOffset } = request; 31 | const prompt = text.slice(0, cursorOffset); 32 | 33 | const messages = [new SystemMessage(this._prompt), new AIMessage(prompt)]; 34 | 35 | try { 36 | const response = await this._completer.invoke(messages); 37 | const items = []; 38 | if (typeof response.content === 'string') { 39 | items.push({ 40 | insertText: response.content 41 | }); 42 | } else { 43 | response.content.forEach(content => { 44 | if (content.type !== 'text') { 45 | return; 46 | } 47 | items.push({ 48 | insertText: content.text, 49 | filterText: prompt.substring(prompt.length) 50 | }); 51 | }); 52 | } 53 | return { items }; 54 | } catch (error) { 55 | console.error('Error fetching completions', error); 56 | return { items: [] }; 57 | } 58 | } 59 | 60 | private _completer: ChatOpenAI; 61 | private _prompt: string = COMPLETION_SYSTEM_PROMPT; 62 | } 63 | -------------------------------------------------------------------------------- /src/default-providers/OpenAI/settings-schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "additionalProperties": false, 5 | "properties": { 6 | "disableStreaming": { 7 | "type": "boolean", 8 | "description": "Whether to disable streaming.\n\nIf streaming is bypassed, then `stream()` will defer to `invoke()`.\n\n- If true, will always bypass streaming case.\n- If false (default), will always use streaming case if available." 9 | }, 10 | "logprobs": { 11 | "type": "boolean", 12 | "description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message." 13 | }, 14 | "topLogprobs": { 15 | "type": "number", 16 | "description": "An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used." 17 | }, 18 | "prefixMessages": { 19 | "type": "array", 20 | "items": { 21 | "anyOf": [ 22 | { 23 | "type": "object", 24 | "properties": { 25 | "content": { 26 | "anyOf": [ 27 | { 28 | "type": "string" 29 | }, 30 | { 31 | "type": "array", 32 | "items": { 33 | "type": "object", 34 | "properties": { 35 | "text": { 36 | "type": "string", 37 | "description": "The text content." 38 | }, 39 | "type": { 40 | "type": "string", 41 | "const": "text", 42 | "description": "The type of the content part." 43 | } 44 | }, 45 | "required": [ 46 | "text", 47 | "type" 48 | ], 49 | "additionalProperties": false, 50 | "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)." 51 | } 52 | } 53 | ], 54 | "description": "The contents of the developer message." 55 | }, 56 | "role": { 57 | "type": "string", 58 | "const": "developer", 59 | "description": "The role of the messages author, in this case `developer`." 60 | }, 61 | "name": { 62 | "type": "string", 63 | "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." 64 | } 65 | }, 66 | "required": [ 67 | "content", 68 | "role" 69 | ], 70 | "additionalProperties": false, 71 | "description": "Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, `developer` messages replace the previous `system` messages." 72 | }, 73 | { 74 | "type": "object", 75 | "properties": { 76 | "content": { 77 | "anyOf": [ 78 | { 79 | "type": "string" 80 | }, 81 | { 82 | "type": "array", 83 | "items": { 84 | "type": "object", 85 | "properties": { 86 | "text": { 87 | "type": "string", 88 | "description": "The text content." 89 | }, 90 | "type": { 91 | "type": "string", 92 | "const": "text", 93 | "description": "The type of the content part." 94 | } 95 | }, 96 | "required": [ 97 | "text", 98 | "type" 99 | ], 100 | "additionalProperties": false, 101 | "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)." 102 | } 103 | } 104 | ], 105 | "description": "The contents of the system message." 106 | }, 107 | "role": { 108 | "type": "string", 109 | "const": "system", 110 | "description": "The role of the messages author, in this case `system`." 111 | }, 112 | "name": { 113 | "type": "string", 114 | "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." 115 | } 116 | }, 117 | "required": [ 118 | "content", 119 | "role" 120 | ], 121 | "additionalProperties": false, 122 | "description": "Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, use `developer` messages for this purpose instead." 123 | }, 124 | { 125 | "type": "object", 126 | "properties": { 127 | "content": { 128 | "anyOf": [ 129 | { 130 | "type": "string" 131 | }, 132 | { 133 | "type": "array", 134 | "items": { 135 | "anyOf": [ 136 | { 137 | "type": "object", 138 | "properties": { 139 | "text": { 140 | "type": "string", 141 | "description": "The text content." 142 | }, 143 | "type": { 144 | "type": "string", 145 | "const": "text", 146 | "description": "The type of the content part." 147 | } 148 | }, 149 | "required": [ 150 | "text", 151 | "type" 152 | ], 153 | "additionalProperties": false, 154 | "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)." 155 | }, 156 | { 157 | "type": "object", 158 | "properties": { 159 | "image_url": { 160 | "type": "object", 161 | "properties": { 162 | "url": { 163 | "type": "string", 164 | "description": "Either a URL of the image or the base64 encoded image data." 165 | }, 166 | "detail": { 167 | "type": "string", 168 | "enum": [ 169 | "auto", 170 | "low", 171 | "high" 172 | ], 173 | "description": "Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding)." 174 | } 175 | }, 176 | "required": [ 177 | "url" 178 | ], 179 | "additionalProperties": false 180 | }, 181 | "type": { 182 | "type": "string", 183 | "const": "image_url", 184 | "description": "The type of the content part." 185 | } 186 | }, 187 | "required": [ 188 | "image_url", 189 | "type" 190 | ], 191 | "additionalProperties": false, 192 | "description": "Learn about [image inputs](https://platform.openai.com/docs/guides/vision)." 193 | }, 194 | { 195 | "type": "object", 196 | "properties": { 197 | "input_audio": { 198 | "type": "object", 199 | "properties": { 200 | "data": { 201 | "type": "string", 202 | "description": "Base64 encoded audio data." 203 | }, 204 | "format": { 205 | "type": "string", 206 | "enum": [ 207 | "wav", 208 | "mp3" 209 | ], 210 | "description": "The format of the encoded audio data. Currently supports \"wav\" and \"mp3\"." 211 | } 212 | }, 213 | "required": [ 214 | "data", 215 | "format" 216 | ], 217 | "additionalProperties": false 218 | }, 219 | "type": { 220 | "type": "string", 221 | "const": "input_audio", 222 | "description": "The type of the content part. Always `input_audio`." 223 | } 224 | }, 225 | "required": [ 226 | "input_audio", 227 | "type" 228 | ], 229 | "additionalProperties": false, 230 | "description": "Learn about [audio inputs](https://platform.openai.com/docs/guides/audio)." 231 | } 232 | ], 233 | "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)." 234 | } 235 | } 236 | ], 237 | "description": "The contents of the user message." 238 | }, 239 | "role": { 240 | "type": "string", 241 | "const": "user", 242 | "description": "The role of the messages author, in this case `user`." 243 | }, 244 | "name": { 245 | "type": "string", 246 | "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." 247 | } 248 | }, 249 | "required": [ 250 | "content", 251 | "role" 252 | ], 253 | "additionalProperties": false, 254 | "description": "Messages sent by an end user, containing prompts or additional context information." 255 | }, 256 | { 257 | "type": "object", 258 | "properties": { 259 | "role": { 260 | "type": "string", 261 | "const": "assistant", 262 | "description": "The role of the messages author, in this case `assistant`." 263 | }, 264 | "audio": { 265 | "anyOf": [ 266 | { 267 | "type": "object", 268 | "properties": { 269 | "id": { 270 | "type": "string", 271 | "description": "Unique identifier for a previous audio response from the model." 272 | } 273 | }, 274 | "required": [ 275 | "id" 276 | ], 277 | "additionalProperties": false, 278 | "description": "Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio)." 279 | }, 280 | { 281 | "type": "null" 282 | } 283 | ], 284 | "description": "Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio)." 285 | }, 286 | "content": { 287 | "anyOf": [ 288 | { 289 | "type": "string" 290 | }, 291 | { 292 | "type": "array", 293 | "items": { 294 | "anyOf": [ 295 | { 296 | "type": "object", 297 | "properties": { 298 | "text": { 299 | "type": "string", 300 | "description": "The text content." 301 | }, 302 | "type": { 303 | "type": "string", 304 | "const": "text", 305 | "description": "The type of the content part." 306 | } 307 | }, 308 | "required": [ 309 | "text", 310 | "type" 311 | ], 312 | "additionalProperties": false, 313 | "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)." 314 | }, 315 | { 316 | "type": "object", 317 | "properties": { 318 | "refusal": { 319 | "type": "string", 320 | "description": "The refusal message generated by the model." 321 | }, 322 | "type": { 323 | "type": "string", 324 | "const": "refusal", 325 | "description": "The type of the content part." 326 | } 327 | }, 328 | "required": [ 329 | "refusal", 330 | "type" 331 | ], 332 | "additionalProperties": false 333 | } 334 | ] 335 | } 336 | }, 337 | { 338 | "type": "null" 339 | } 340 | ], 341 | "description": "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified." 342 | }, 343 | "function_call": { 344 | "anyOf": [ 345 | { 346 | "type": "object", 347 | "properties": { 348 | "arguments": { 349 | "type": "string", 350 | "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." 351 | }, 352 | "name": { 353 | "type": "string", 354 | "description": "The name of the function to call." 355 | } 356 | }, 357 | "required": [ 358 | "arguments", 359 | "name" 360 | ], 361 | "additionalProperties": false, 362 | "deprecated": "Deprecated and replaced by `tool_calls`. The name and arguments of a\nfunction that should be called, as generated by the model." 363 | }, 364 | { 365 | "type": "null" 366 | } 367 | ], 368 | "deprecated": "Deprecated and replaced by `tool_calls`. The name and arguments of a\nfunction that should be called, as generated by the model." 369 | }, 370 | "name": { 371 | "type": "string", 372 | "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." 373 | }, 374 | "refusal": { 375 | "type": [ 376 | "string", 377 | "null" 378 | ], 379 | "description": "The refusal message by the assistant." 380 | }, 381 | "tool_calls": { 382 | "type": "array", 383 | "items": { 384 | "type": "object", 385 | "properties": { 386 | "id": { 387 | "type": "string", 388 | "description": "The ID of the tool call." 389 | }, 390 | "function": { 391 | "type": "object", 392 | "properties": { 393 | "arguments": { 394 | "type": "string", 395 | "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." 396 | }, 397 | "name": { 398 | "type": "string", 399 | "description": "The name of the function to call." 400 | } 401 | }, 402 | "required": [ 403 | "arguments", 404 | "name" 405 | ], 406 | "additionalProperties": false, 407 | "description": "The function that the model called." 408 | }, 409 | "type": { 410 | "type": "string", 411 | "const": "function", 412 | "description": "The type of the tool. Currently, only `function` is supported." 413 | } 414 | }, 415 | "required": [ 416 | "id", 417 | "function", 418 | "type" 419 | ], 420 | "additionalProperties": false 421 | }, 422 | "description": "The tool calls generated by the model, such as function calls." 423 | } 424 | }, 425 | "required": [ 426 | "role" 427 | ], 428 | "additionalProperties": false, 429 | "description": "Messages sent by the model in response to user messages." 430 | }, 431 | { 432 | "type": "object", 433 | "properties": { 434 | "content": { 435 | "anyOf": [ 436 | { 437 | "type": "string" 438 | }, 439 | { 440 | "type": "array", 441 | "items": { 442 | "type": "object", 443 | "properties": { 444 | "text": { 445 | "type": "string", 446 | "description": "The text content." 447 | }, 448 | "type": { 449 | "type": "string", 450 | "const": "text", 451 | "description": "The type of the content part." 452 | } 453 | }, 454 | "required": [ 455 | "text", 456 | "type" 457 | ], 458 | "additionalProperties": false, 459 | "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation)." 460 | } 461 | } 462 | ], 463 | "description": "The contents of the tool message." 464 | }, 465 | "role": { 466 | "type": "string", 467 | "const": "tool", 468 | "description": "The role of the messages author, in this case `tool`." 469 | }, 470 | "tool_call_id": { 471 | "type": "string", 472 | "description": "Tool call that this message is responding to." 473 | } 474 | }, 475 | "required": [ 476 | "content", 477 | "role", 478 | "tool_call_id" 479 | ], 480 | "additionalProperties": false 481 | }, 482 | { 483 | "type": "object", 484 | "properties": { 485 | "content": { 486 | "type": [ 487 | "string", 488 | "null" 489 | ], 490 | "description": "The contents of the function message." 491 | }, 492 | "name": { 493 | "type": "string", 494 | "description": "The name of the function to call." 495 | }, 496 | "role": { 497 | "type": "string", 498 | "const": "function", 499 | "description": "The role of the messages author, in this case `function`." 500 | } 501 | }, 502 | "required": [ 503 | "content", 504 | "name", 505 | "role" 506 | ], 507 | "additionalProperties": false, 508 | "deprecated": true 509 | } 510 | ], 511 | "description": "Developer-provided instructions that the model should follow, regardless of messages sent by the user. With o1 models and newer, `developer` messages replace the previous `system` messages." 512 | }, 513 | "description": "ChatGPT messages to pass as a prefix to the prompt" 514 | }, 515 | "__includeRawResponse": { 516 | "type": "boolean", 517 | "description": "Whether to include the raw OpenAI response in the output message's \"additional_kwargs\" field. Currently in experimental beta." 518 | }, 519 | "supportsStrictToolCalling": { 520 | "type": "boolean", 521 | "description": "Whether the model supports the `strict` argument when passing in tools. If `undefined` the `strict` argument will not be passed to OpenAI." 522 | }, 523 | "modalities": { 524 | "type": "array", 525 | "items": { 526 | "type": "string", 527 | "enum": [ 528 | "text", 529 | "audio" 530 | ] 531 | }, 532 | "description": "Output types that you would like the model to generate for this request. Most models are capable of generating text, which is the default:\n\n`[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to [generate audio](https://platform.openai.com/docs/guides/audio). To request that this model generate both text and audio responses, you can use:\n\n`[\"text\", \"audio\"]`" 533 | }, 534 | "audio": { 535 | "type": "object", 536 | "properties": { 537 | "format": { 538 | "type": "string", 539 | "enum": [ 540 | "wav", 541 | "mp3", 542 | "flac", 543 | "opus", 544 | "pcm16" 545 | ], 546 | "description": "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`." 547 | }, 548 | "voice": { 549 | "type": "string", 550 | "enum": [ 551 | "alloy", 552 | "ash", 553 | "ballad", 554 | "coral", 555 | "echo", 556 | "sage", 557 | "shimmer", 558 | "verse" 559 | ], 560 | "description": "The voice the model uses to respond. Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices are less expressive)." 561 | } 562 | }, 563 | "required": [ 564 | "format", 565 | "voice" 566 | ], 567 | "additionalProperties": false, 568 | "description": "Parameters for audio output. Required when audio output is requested with `modalities: [\"audio\"]`. [Learn more](https://platform.openai.com/docs/guides/audio)." 569 | }, 570 | "reasoningEffort": { 571 | "type": "string", 572 | "enum": [ 573 | "low", 574 | "medium", 575 | "high" 576 | ], 577 | "description": "Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response." 578 | }, 579 | "temperature": { 580 | "type": "number", 581 | "description": "Sampling temperature to use" 582 | }, 583 | "maxTokens": { 584 | "type": "number", 585 | "description": "Maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the model's maximum context size." 586 | }, 587 | "maxCompletionTokens": { 588 | "type": "number", 589 | "description": "Maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the model's maximum context size. Alias for `maxTokens` for reasoning models." 590 | }, 591 | "topP": { 592 | "type": "number", 593 | "description": "Total probability mass of tokens to consider at each step" 594 | }, 595 | "frequencyPenalty": { 596 | "type": "number", 597 | "description": "Penalizes repeated tokens according to frequency" 598 | }, 599 | "presencePenalty": { 600 | "type": "number", 601 | "description": "Penalizes repeated tokens" 602 | }, 603 | "n": { 604 | "type": "number", 605 | "description": "Number of completions to generate for each prompt" 606 | }, 607 | "logitBias": { 608 | "type": "object", 609 | "additionalProperties": { 610 | "type": "number" 611 | }, 612 | "description": "Dictionary used to adjust the probability of specific tokens being generated" 613 | }, 614 | "user": { 615 | "type": "string", 616 | "description": "Unique string identifier representing your end-user, which can help OpenAI to monitor and detect abuse." 617 | }, 618 | "streaming": { 619 | "type": "boolean", 620 | "description": "Whether to stream the results or not. Enabling disables tokenUsage reporting" 621 | }, 622 | "streamUsage": { 623 | "type": "boolean", 624 | "description": "Whether or not to include token usage data in streamed chunks.", 625 | "default": false 626 | }, 627 | "modelName": { 628 | "type": "string", 629 | "description": "Model name to use Alias for `model`", 630 | "deprecated": "Use \"model\" instead." 631 | }, 632 | "model": { 633 | "type": "string", 634 | "description": "Model name to use" 635 | }, 636 | "modelKwargs": { 637 | "type": "object", 638 | "description": "Holds any additional parameters that are valid to pass to {@link * https://platform.openai.com/docs/api-reference/completions/create | } * `openai.createCompletion`} that are not explicitly specified on this class." 639 | }, 640 | "stop": { 641 | "type": "array", 642 | "items": { 643 | "type": "string" 644 | }, 645 | "description": "List of stop words to use when generating Alias for `stopSequences`" 646 | }, 647 | "stopSequences": { 648 | "type": "array", 649 | "items": { 650 | "type": "string" 651 | }, 652 | "description": "List of stop words to use when generating" 653 | }, 654 | "timeout": { 655 | "type": "number", 656 | "description": "Timeout to use when making requests to OpenAI." 657 | }, 658 | "openAIApiKey": { 659 | "type": "string", 660 | "description": "API key to use when making requests to OpenAI. Defaults to the value of `OPENAI_API_KEY` environment variable. Alias for `apiKey`" 661 | }, 662 | "apiKey": { 663 | "type": "string", 664 | "description": "API key to use when making requests to OpenAI. Defaults to the value of `OPENAI_API_KEY` environment variable." 665 | } 666 | }, 667 | "definitions": {} 668 | } 669 | -------------------------------------------------------------------------------- /src/default-providers/WebLLM/completer.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { HumanMessage, SystemMessage } from '@langchain/core/messages'; 6 | import { ChatWebLLM } from '@langchain/community/chat_models/webllm'; 7 | 8 | import { BaseCompleter, IBaseCompleter } from '../../base-completer'; 9 | import { COMPLETION_SYSTEM_PROMPT } from '../../provider'; 10 | 11 | /** 12 | * Regular expression to match the '```' string at the start of a string. 13 | * So the completions returned by the LLM can still be kept after removing the code block formatting. 14 | * 15 | * For example, if the response contains the following content after typing `import pandas`: 16 | * 17 | * ```python 18 | * as pd 19 | * ``` 20 | * 21 | * The formatting string after removing the code block delimiters will be: 22 | * 23 | * as pd 24 | */ 25 | const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/; 26 | 27 | /** 28 | * Regular expression to match the '```' string at the end of a string. 29 | */ 30 | const CODE_BLOCK_END_REGEX = /```$/; 31 | 32 | export class WebLLMCompleter implements IBaseCompleter { 33 | constructor(options: BaseCompleter.IOptions) { 34 | const model = options.settings.model as string; 35 | // provide model separately since ChatWebLLM expects it 36 | this._completer = new ChatWebLLM({ 37 | ...options.settings, 38 | model 39 | }); 40 | 41 | // Initialize the model and track its status 42 | this._isInitialized = false; 43 | this._isInitializing = false; 44 | this._initError = null; 45 | void this._initializeModel(); 46 | } 47 | 48 | /** 49 | * Initialize the WebLLM model 50 | */ 51 | private async _initializeModel(): Promise { 52 | if (this._isInitialized || this._isInitializing) { 53 | return; 54 | } 55 | 56 | this._isInitializing = true; 57 | try { 58 | await this._completer.initialize((progress: any) => { 59 | console.log('WebLLM initialization progress:', progress); 60 | }); 61 | this._isInitialized = true; 62 | this._isInitializing = false; 63 | console.log('WebLLM model successfully initialized'); 64 | } catch (error) { 65 | this._initError = 66 | error instanceof Error ? error : new Error(String(error)); 67 | this._isInitializing = false; 68 | console.error('Failed to initialize WebLLM model:', error); 69 | } 70 | } 71 | 72 | /** 73 | * Getter and setter for the initial prompt. 74 | */ 75 | get prompt(): string { 76 | return this._prompt; 77 | } 78 | set prompt(value: string) { 79 | this._prompt = value; 80 | } 81 | 82 | get provider(): ChatWebLLM { 83 | return this._completer; 84 | } 85 | 86 | async fetch( 87 | request: CompletionHandler.IRequest, 88 | context: IInlineCompletionContext 89 | ) { 90 | // Abort any pending request 91 | if (this._abortController) { 92 | this._abortController.abort(); 93 | } 94 | 95 | // Create a new abort controller for this request 96 | this._abortController = new AbortController(); 97 | const signal = this._abortController.signal; 98 | 99 | if (!this._isInitialized) { 100 | if (this._initError) { 101 | console.error('WebLLM model failed to initialize:', this._initError); 102 | return { items: [] }; 103 | } 104 | 105 | if (!this._isInitializing) { 106 | // Try to initialize again if it's not currently initializing 107 | await this._initializeModel(); 108 | } else { 109 | console.log( 110 | 'WebLLM model is still initializing, please try again later' 111 | ); 112 | return { items: [] }; 113 | } 114 | 115 | // Return empty if still not initialized 116 | if (!this._isInitialized) { 117 | return { items: [] }; 118 | } 119 | } 120 | 121 | const { text, offset: cursorOffset } = request; 122 | const prompt = text.slice(0, cursorOffset); 123 | const trimmedPrompt = prompt.trim(); 124 | 125 | const messages = [ 126 | new SystemMessage(this._prompt), 127 | new HumanMessage(trimmedPrompt) 128 | ]; 129 | 130 | try { 131 | console.log('Trigger invoke'); 132 | const response = await this._completer.invoke(messages, { signal }); 133 | let content = response.content as string; 134 | console.log('Response content:', content); 135 | 136 | if (CODE_BLOCK_START_REGEX.test(content)) { 137 | content = content 138 | .replace(CODE_BLOCK_START_REGEX, '') 139 | .replace(CODE_BLOCK_END_REGEX, ''); 140 | } 141 | 142 | const items = [{ insertText: content }]; 143 | return { 144 | items 145 | }; 146 | } catch (error) { 147 | if (error instanceof Error) { 148 | console.error('Error fetching completion from WebLLM:', error.message); 149 | } else { 150 | console.error('Unknown error fetching completion from WebLLM:', error); 151 | } 152 | return { items: [] }; 153 | } 154 | } 155 | 156 | private _completer: ChatWebLLM; 157 | private _prompt: string = COMPLETION_SYSTEM_PROMPT; 158 | private _isInitialized: boolean = false; 159 | private _isInitializing: boolean = false; 160 | private _initError: Error | null = null; 161 | private _abortController: AbortController | null = null; 162 | } 163 | -------------------------------------------------------------------------------- /src/default-providers/WebLLM/instructions.ts: -------------------------------------------------------------------------------- 1 | export default ` 2 | WebLLM enables running LLMs directly in your browser, making it possible to use AI features without sending data to external servers. 3 | 4 | WebLLM runs models entirely in your browser, so initial model download may be large (100MB-2GB depending on the model). 5 | 6 | Requirements: WebLLM requires a browser with WebGPU support (Chrome 113+, Edge 113+, or Safari 17+). It will not work on older browsers or browsers without WebGPU enabled. 7 | 8 | 1. Enter a model in the JupyterLab settings under the **Ai providers** section. Select the \`WebLLM\` provider and type the model you want to use. 9 | 2. When you first use WebLLM, your browser will download the model. A progress notification will appear: 10 | 3. Once loaded, use the chat 11 | 4. Example of available models: 12 | - Llama-3.2-1B-Instruct-q4f32_1-MLC 13 | - Mistral-7B-Instruct-v0.3-q4f32_1-MLC 14 | - Qwen3-0.6B-q4f32_1-MLC 15 | 5. See the full list of models: https://github.com/mlc-ai/web-llm/blob/632d34725629b480b5b2772379ef5c150b1286f0/src/config.ts#L303-L309 16 | 17 | Model performance depends on your device's hardware capabilities. More powerful devices will run models faster. Some larger models may not work well on devices with limited GPU memory or may experience slow response times. 18 | `; 19 | 20 | /** 21 | * Check if the browser supports WebLLM. 22 | */ 23 | export async function compatibilityCheck(): Promise { 24 | // Check if the browser supports the ChromeAI model 25 | if (typeof navigator === 'undefined' || !('gpu' in navigator)) { 26 | return 'Your browser does not support WebLLM, it does not support required WebGPU.'; 27 | } 28 | if ((await navigator.gpu.requestAdapter()) === null) { 29 | return 'You may need to enable WebGPU, `await navigator.gpu.requestAdapter()` is null.'; 30 | } 31 | // If the model is available, return null to indicate compatibility 32 | return null; 33 | } 34 | -------------------------------------------------------------------------------- /src/default-providers/WebLLM/settings-schema.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "properties": { 5 | "disableStreaming": { 6 | "type": "boolean", 7 | "description": "Whether to disable streaming.\n\nIf streaming is bypassed, then `stream()` will defer to `invoke()`.\n\n- If true, will always bypass streaming case.\n- If false (default), will always use streaming case if available." 8 | }, 9 | "temperature": { 10 | "type": "number" 11 | }, 12 | "model": { 13 | "type": "string" 14 | } 15 | }, 16 | "required": [ 17 | "model" 18 | ], 19 | "additionalProperties": false, 20 | "definitions": {} 21 | } 22 | -------------------------------------------------------------------------------- /src/default-providers/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | JupyterFrontEnd, 3 | JupyterFrontEndPlugin 4 | } from '@jupyterlab/application'; 5 | import { Notification } from '@jupyterlab/apputils'; 6 | 7 | import { ChatAnthropic } from '@langchain/anthropic'; 8 | import { ChatWebLLM } from '@langchain/community/chat_models/webllm'; 9 | import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai'; 10 | import { ChatMistralAI } from '@langchain/mistralai'; 11 | import { ChatOllama } from '@langchain/ollama'; 12 | import { ChatOpenAI } from '@langchain/openai'; 13 | 14 | // Import completers 15 | import { AnthropicCompleter } from './Anthropic/completer'; 16 | import { ChromeCompleter } from './ChromeAI/completer'; 17 | import { CodestralCompleter } from './MistralAI/completer'; 18 | import { OllamaCompleter } from './Ollama/completer'; 19 | import { OpenAICompleter } from './OpenAI/completer'; 20 | import { WebLLMCompleter } from './WebLLM/completer'; 21 | 22 | // Import Settings 23 | import AnthropicSettings from './Anthropic/settings-schema.json'; 24 | import ChromeAISettings from './ChromeAI/settings-schema.json'; 25 | import MistralAISettings from './MistralAI/settings-schema.json'; 26 | import OllamaAISettings from './Ollama/settings-schema.json'; 27 | import OpenAISettings from './OpenAI/settings-schema.json'; 28 | import WebLLMSettings from './WebLLM/settings-schema.json'; 29 | 30 | // Import instructions 31 | import ChromeAIInstructions, { 32 | compatibilityCheck as chromeAICompatibilityCheck 33 | } from './ChromeAI/instructions'; 34 | import MistralAIInstructions from './MistralAI/instructions'; 35 | import OllamaInstructions from './Ollama/instructions'; 36 | import WebLLMInstructions, { 37 | compatibilityCheck as webLLMCompatibilityCheck 38 | } from './WebLLM/instructions'; 39 | 40 | import { prebuiltAppConfig } from '@mlc-ai/web-llm'; 41 | 42 | import { IAIProvider, IAIProviderRegistry } from '../tokens'; 43 | 44 | // Build the AIProvider list 45 | const AIProviders: IAIProvider[] = [ 46 | { 47 | name: 'Anthropic', 48 | chatModel: ChatAnthropic, 49 | completer: AnthropicCompleter, 50 | settingsSchema: AnthropicSettings, 51 | errorMessage: (error: any) => error.error.error.message 52 | }, 53 | { 54 | name: 'ChromeAI', 55 | // TODO: fix 56 | // @ts-expect-error: missing properties 57 | chatModel: ChromeAI, 58 | completer: ChromeCompleter, 59 | instructions: ChromeAIInstructions, 60 | settingsSchema: ChromeAISettings, 61 | compatibilityCheck: chromeAICompatibilityCheck 62 | }, 63 | { 64 | name: 'MistralAI', 65 | chatModel: ChatMistralAI, 66 | completer: CodestralCompleter, 67 | instructions: MistralAIInstructions, 68 | settingsSchema: MistralAISettings 69 | }, 70 | { 71 | name: 'Ollama', 72 | chatModel: ChatOllama, 73 | completer: OllamaCompleter, 74 | instructions: OllamaInstructions, 75 | settingsSchema: OllamaAISettings 76 | }, 77 | { 78 | name: 'OpenAI', 79 | chatModel: ChatOpenAI, 80 | completer: OpenAICompleter, 81 | settingsSchema: OpenAISettings 82 | } 83 | ]; 84 | 85 | /** 86 | * Register the WebLLM provider in a separate plugin since it creates notifications 87 | * when the model is changed in the settings. 88 | */ 89 | const webLLMProviderPlugin: JupyterFrontEndPlugin = { 90 | id: '@jupyterlite/ai:webllm', 91 | description: 'Register the WebLLM provider', 92 | autoStart: true, 93 | requires: [IAIProviderRegistry], 94 | activate: (app: JupyterFrontEnd, registry: IAIProviderRegistry) => { 95 | registry.add({ 96 | name: 'WebLLM', 97 | chatModel: ChatWebLLM, 98 | completer: WebLLMCompleter, 99 | settingsSchema: WebLLMSettings, 100 | instructions: WebLLMInstructions, 101 | compatibilityCheck: webLLMCompatibilityCheck, 102 | exposeChatModel: true 103 | }); 104 | 105 | registry.providerChanged.connect(async (sender, args) => { 106 | const { currentName, currentChatModel, chatError } = registry; 107 | if (currentChatModel === null) { 108 | Notification.emit(chatError, 'error', { 109 | autoClose: 2000 110 | }); 111 | return; 112 | } 113 | 114 | // TODO: implement a proper way to handle models that may need to be initialized before being used. 115 | // Mostly applies to WebLLM and ChromeAI as they may need to download the model in the browser first. 116 | if (currentName === 'WebLLM') { 117 | const compatibilityError = await webLLMCompatibilityCheck(); 118 | 119 | if (compatibilityError) { 120 | Notification.dismiss(); 121 | Notification.emit(compatibilityError, 'error', { 122 | autoClose: 2000 123 | }); 124 | return; 125 | } 126 | 127 | const model = currentChatModel as ChatWebLLM; 128 | if (model === null || !model.model) { 129 | return; 130 | } 131 | 132 | // Find if the model is part of the prebuiltAppConfig 133 | const modelRecord = prebuiltAppConfig.model_list.find( 134 | modelRecord => modelRecord.model_id === model.model 135 | ); 136 | if (!modelRecord) { 137 | Notification.dismiss(); 138 | Notification.emit( 139 | `Model ${model.model} not found in the prebuiltAppConfig`, 140 | 'error', 141 | { 142 | autoClose: 2000 143 | } 144 | ); 145 | return; 146 | } 147 | 148 | // create a notification 149 | const notification = Notification.emit( 150 | 'Loading model...', 151 | 'in-progress', 152 | { 153 | autoClose: false, 154 | progress: 0 155 | } 156 | ); 157 | try { 158 | void model.initialize(report => { 159 | const { progress, text } = report; 160 | if (progress === 1) { 161 | Notification.update({ 162 | id: notification, 163 | progress: 1, 164 | message: `Model ${model.model} loaded successfully`, 165 | type: 'success', 166 | autoClose: 2000 167 | }); 168 | return; 169 | } 170 | Notification.update({ 171 | id: notification, 172 | progress: progress / 1, 173 | message: text, 174 | type: 'in-progress' 175 | }); 176 | }); 177 | } catch (err) { 178 | Notification.update({ 179 | id: notification, 180 | progress: 1, 181 | message: `Error loading model ${model.model}`, 182 | type: 'error', 183 | autoClose: 2000 184 | }); 185 | } 186 | } 187 | }); 188 | } 189 | }; 190 | 191 | /** 192 | * Register all default AI providers. 193 | */ 194 | const aiProviderPlugins = AIProviders.map(provider => { 195 | return { 196 | id: `@jupyterlite/ai:${provider.name}`, 197 | autoStart: true, 198 | requires: [IAIProviderRegistry], 199 | activate: (app: JupyterFrontEnd, registry: IAIProviderRegistry) => { 200 | registry.add(provider); 201 | } 202 | }; 203 | }); 204 | 205 | export const defaultProviderPlugins: JupyterFrontEndPlugin[] = [ 206 | webLLMProviderPlugin, 207 | ...aiProviderPlugins 208 | ]; 209 | -------------------------------------------------------------------------------- /src/global.d.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Jupyter Development Team. 3 | * Distributed under the terms of the Modified BSD License. 4 | */ 5 | 6 | declare module '*.svg' { 7 | const value: string; 8 | export default value; 9 | } 10 | -------------------------------------------------------------------------------- /src/icons.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Jupyter Development Team. 3 | * Distributed under the terms of the Modified BSD License. 4 | */ 5 | 6 | import { LabIcon } from '@jupyterlab/ui-components'; 7 | 8 | /** 9 | * This icon is based on the jupyternaut icon from Jupyter AI: 10 | * https://github.com/jupyterlab/jupyter-ai/blob/main/packages/jupyter-ai/style/icons/jupyternaut.svg 11 | * With a small tweak for the colors to match the JupyterLite icon. 12 | */ 13 | import jupyternautLiteSvg from '../style/icons/jupyternaut-lite.svg'; 14 | 15 | export const jupyternautLiteIcon = new LabIcon({ 16 | name: '@jupyterlite/ai:jupyternaut-lite', 17 | svgstr: jupyternautLiteSvg 18 | }); 19 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | ActiveCellManager, 3 | buildChatSidebar, 4 | buildErrorWidget, 5 | ChatCommandRegistry, 6 | IActiveCellManager, 7 | IChatCommandRegistry, 8 | InputToolbarRegistry 9 | } from '@jupyter/chat'; 10 | import { 11 | JupyterFrontEnd, 12 | JupyterFrontEndPlugin 13 | } from '@jupyterlab/application'; 14 | import { ReactWidget, IThemeManager } from '@jupyterlab/apputils'; 15 | import { ICompletionProviderManager } from '@jupyterlab/completer'; 16 | import { INotebookTracker } from '@jupyterlab/notebook'; 17 | import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; 18 | import { ISettingRegistry } from '@jupyterlab/settingregistry'; 19 | import { IFormRendererRegistry } from '@jupyterlab/ui-components'; 20 | import { ReadonlyPartialJSONObject } from '@lumino/coreutils'; 21 | import { ISecretsManager, SecretsManager } from 'jupyter-secrets-manager'; 22 | 23 | import { ChatHandler, welcomeMessage } from './chat-handler'; 24 | import { CompletionProvider } from './completion-provider'; 25 | import { defaultProviderPlugins } from './default-providers'; 26 | import { AIProviderRegistry } from './provider'; 27 | import { aiSettingsRenderer } from './settings'; 28 | import { IAIProviderRegistry, PLUGIN_IDS } from './tokens'; 29 | import { stopItem } from './components/stop-button'; 30 | 31 | const chatCommandRegistryPlugin: JupyterFrontEndPlugin = { 32 | id: PLUGIN_IDS.chatCommandRegistry, 33 | description: 'Autocompletion registry', 34 | autoStart: true, 35 | provides: IChatCommandRegistry, 36 | activate: () => { 37 | const registry = new ChatCommandRegistry(); 38 | registry.addProvider(new ChatHandler.ClearCommandProvider()); 39 | return registry; 40 | } 41 | }; 42 | 43 | const chatPlugin: JupyterFrontEndPlugin = { 44 | id: PLUGIN_IDS.chat, 45 | description: 'LLM chat extension', 46 | autoStart: true, 47 | requires: [IAIProviderRegistry, IRenderMimeRegistry, IChatCommandRegistry], 48 | optional: [INotebookTracker, ISettingRegistry, IThemeManager], 49 | activate: async ( 50 | app: JupyterFrontEnd, 51 | providerRegistry: IAIProviderRegistry, 52 | rmRegistry: IRenderMimeRegistry, 53 | chatCommandRegistry: IChatCommandRegistry, 54 | notebookTracker: INotebookTracker | null, 55 | settingsRegistry: ISettingRegistry | null, 56 | themeManager: IThemeManager | null 57 | ) => { 58 | let activeCellManager: IActiveCellManager | null = null; 59 | if (notebookTracker) { 60 | activeCellManager = new ActiveCellManager({ 61 | tracker: notebookTracker, 62 | shell: app.shell 63 | }); 64 | } 65 | 66 | const chatHandler = new ChatHandler({ 67 | providerRegistry, 68 | activeCellManager 69 | }); 70 | 71 | let sendWithShiftEnter = false; 72 | let enableCodeToolbar = true; 73 | let personaName = 'AI'; 74 | 75 | function loadSetting(setting: ISettingRegistry.ISettings): void { 76 | sendWithShiftEnter = setting.get('sendWithShiftEnter') 77 | .composite as boolean; 78 | enableCodeToolbar = setting.get('enableCodeToolbar').composite as boolean; 79 | personaName = setting.get('personaName').composite as string; 80 | 81 | // set the properties 82 | chatHandler.config = { sendWithShiftEnter, enableCodeToolbar }; 83 | chatHandler.personaName = personaName; 84 | } 85 | 86 | Promise.all([app.restored, settingsRegistry?.load(chatPlugin.id)]) 87 | .then(([, settings]) => { 88 | if (!settings) { 89 | console.warn( 90 | 'The SettingsRegistry is not loaded for the chat extension' 91 | ); 92 | return; 93 | } 94 | loadSetting(settings); 95 | settings.changed.connect(loadSetting); 96 | }) 97 | .catch(reason => { 98 | console.error( 99 | `Something went wrong when reading the settings.\n${reason}` 100 | ); 101 | }); 102 | 103 | let chatWidget: ReactWidget | null = null; 104 | 105 | const inputToolbarRegistry = InputToolbarRegistry.defaultToolbarRegistry(); 106 | const stopButton = stopItem(() => chatHandler.stopStreaming()); 107 | inputToolbarRegistry.addItem('stop', stopButton); 108 | 109 | chatHandler.writersChanged.connect((_, writers) => { 110 | if ( 111 | writers.filter( 112 | writer => writer.user.username === chatHandler.personaName 113 | ).length 114 | ) { 115 | inputToolbarRegistry.hide('send'); 116 | inputToolbarRegistry.show('stop'); 117 | } else { 118 | inputToolbarRegistry.hide('stop'); 119 | inputToolbarRegistry.show('send'); 120 | } 121 | }); 122 | 123 | try { 124 | chatWidget = buildChatSidebar({ 125 | model: chatHandler, 126 | themeManager, 127 | rmRegistry, 128 | chatCommandRegistry, 129 | inputToolbarRegistry, 130 | welcomeMessage: welcomeMessage(providerRegistry.providers) 131 | }); 132 | chatWidget.title.caption = 'Jupyterlite AI Chat'; 133 | } catch (e) { 134 | chatWidget = buildErrorWidget(themeManager); 135 | } 136 | 137 | app.shell.add(chatWidget as ReactWidget, 'left', { rank: 2000 }); 138 | 139 | console.log('Chat extension initialized'); 140 | } 141 | }; 142 | 143 | const completerPlugin: JupyterFrontEndPlugin = { 144 | id: PLUGIN_IDS.completer, 145 | autoStart: true, 146 | requires: [IAIProviderRegistry, ICompletionProviderManager], 147 | activate: ( 148 | app: JupyterFrontEnd, 149 | providerRegistry: IAIProviderRegistry, 150 | manager: ICompletionProviderManager 151 | ): void => { 152 | const completer = new CompletionProvider({ 153 | providerRegistry, 154 | requestCompletion: () => app.commands.execute('inline-completer:invoke') 155 | }); 156 | manager.registerInlineProvider(completer); 157 | } 158 | }; 159 | 160 | const providerRegistryPlugin: JupyterFrontEndPlugin = 161 | SecretsManager.sign(PLUGIN_IDS.providerRegistry, token => ({ 162 | id: PLUGIN_IDS.providerRegistry, 163 | autoStart: true, 164 | requires: [IFormRendererRegistry, ISettingRegistry], 165 | optional: [IRenderMimeRegistry, ISecretsManager], 166 | provides: IAIProviderRegistry, 167 | activate: ( 168 | app: JupyterFrontEnd, 169 | editorRegistry: IFormRendererRegistry, 170 | settingRegistry: ISettingRegistry, 171 | rmRegistry?: IRenderMimeRegistry, 172 | secretsManager?: ISecretsManager 173 | ): IAIProviderRegistry => { 174 | const providerRegistry = new AIProviderRegistry({ 175 | token, 176 | secretsManager 177 | }); 178 | 179 | editorRegistry.addRenderer( 180 | `${PLUGIN_IDS.providerRegistry}.AIprovider`, 181 | aiSettingsRenderer({ 182 | providerRegistry, 183 | secretsToken: token, 184 | rmRegistry, 185 | secretsManager 186 | }) 187 | ); 188 | 189 | settingRegistry 190 | .load(providerRegistryPlugin.id) 191 | .then(settings => { 192 | if (!secretsManager) { 193 | delete settings.schema.properties?.['UseSecretsManager']; 194 | } 195 | const updateProvider = () => { 196 | // Update the settings to the AI providers. 197 | const providerSettings = (settings.get('AIprovider').composite ?? { 198 | provider: 'None' 199 | }) as ReadonlyPartialJSONObject; 200 | providerRegistry.setProvider({ 201 | name: providerSettings.provider as string, 202 | settings: providerSettings 203 | }); 204 | }; 205 | 206 | settings.changed.connect(() => updateProvider()); 207 | updateProvider(); 208 | }) 209 | .catch(reason => { 210 | console.error( 211 | `Failed to load settings for ${providerRegistryPlugin.id}`, 212 | reason 213 | ); 214 | }); 215 | 216 | return providerRegistry; 217 | } 218 | })); 219 | 220 | export default [ 221 | providerRegistryPlugin, 222 | chatCommandRegistryPlugin, 223 | chatPlugin, 224 | completerPlugin, 225 | ...defaultProviderPlugins 226 | ]; 227 | 228 | export { IAIProviderRegistry } from './tokens'; 229 | -------------------------------------------------------------------------------- /src/provider.ts: -------------------------------------------------------------------------------- 1 | import { 2 | CompletionHandler, 3 | IInlineCompletionContext 4 | } from '@jupyterlab/completer'; 5 | import { BaseLanguageModel } from '@langchain/core/language_models/base'; 6 | import { BaseChatModel } from '@langchain/core/language_models/chat_models'; 7 | import { ISignal, Signal } from '@lumino/signaling'; 8 | import { ReadonlyPartialJSONObject } from '@lumino/coreutils'; 9 | import { JSONSchema7 } from 'json-schema'; 10 | import { ISecretsManager } from 'jupyter-secrets-manager'; 11 | 12 | import { IBaseCompleter } from './base-completer'; 13 | import { getSecretId, SECRETS_REPLACEMENT } from './settings'; 14 | import { 15 | IAIProvider, 16 | IAIProviderRegistry, 17 | IDict, 18 | ISetProviderOptions, 19 | PLUGIN_IDS 20 | } from './tokens'; 21 | import { AIChatModel, AICompleter } from './types/ai-model'; 22 | 23 | const SECRETS_NAMESPACE = PLUGIN_IDS.providerRegistry; 24 | 25 | export const chatSystemPrompt = ( 26 | options: AIProviderRegistry.IPromptOptions 27 | ) => ` 28 | You are Jupyternaut, a conversational assistant living in JupyterLab to help users. 29 | You are not a language model, but rather an application built on a foundation model from ${options.provider_name}. 30 | You are talkative and you provide lots of specific details from the foundation model's context. 31 | You may use Markdown to format your response. 32 | If your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after). 33 | If your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters. 34 | All dollar quantities (of USD) must be formatted in LaTeX, with the \`$\` symbol escaped by a single backslash \`\\\`. 35 | - Example prompt: \`If I have \\\\$100 and spend \\\\$20, how much money do I have left?\` 36 | - **Correct** response: \`You have \\(\\$80\\) remaining.\` 37 | - **Incorrect** response: \`You have $80 remaining.\` 38 | If you do not know the answer to a question, answer truthfully by responding that you do not know. 39 | The following is a friendly conversation between you and a human. 40 | `; 41 | 42 | export const COMPLETION_SYSTEM_PROMPT = ` 43 | You are an application built to provide helpful code completion suggestions. 44 | You should only produce code. Keep comments to minimum, use the 45 | programming language comment syntax. Produce clean code. 46 | The code is written in JupyterLab, a data analysis and code development 47 | environment which can execute code extended with additional syntax for 48 | interactive features, such as magics. 49 | Only give raw strings back, do not format the response using backticks. 50 | The output should be a single string, and should only contain the code that will complete the 51 | give code passed as input, no explanation whatsoever. 52 | Do not include the prompt in the output, only the string that should be appended to the current input. 53 | Here is the code to complete: 54 | `; 55 | 56 | export class AIProviderRegistry implements IAIProviderRegistry { 57 | /** 58 | * The constructor of the provider registry. 59 | */ 60 | constructor(options: AIProviderRegistry.IOptions) { 61 | this._secretsManager = options.secretsManager || null; 62 | Private.setToken(options.token); 63 | } 64 | 65 | /** 66 | * Get the list of provider names. 67 | */ 68 | get providers(): string[] { 69 | return Array.from(Private.providers.keys()); 70 | } 71 | 72 | /** 73 | * Add a new provider. 74 | */ 75 | add(provider: IAIProvider): void { 76 | if (Private.providers.has(provider.name)) { 77 | throw new Error( 78 | `A AI provider named '${provider.name}' is already registered` 79 | ); 80 | } 81 | Private.providers.set(provider.name, provider); 82 | 83 | // Set the provider if the loading has been deferred. 84 | if (provider.name === this._deferredProvider?.name) { 85 | this.setProvider(this._deferredProvider); 86 | } 87 | } 88 | 89 | /** 90 | * Get the current provider name. 91 | */ 92 | get currentName(): string { 93 | return Private.getName(); 94 | } 95 | 96 | /** 97 | * Get the current AICompleter. 98 | */ 99 | get currentCompleter(): AICompleter | null { 100 | if (Private.getName() === 'None') { 101 | return null; 102 | } 103 | const completer = Private.getCompleter(); 104 | if (completer === null) { 105 | return null; 106 | } 107 | return { 108 | fetch: ( 109 | request: CompletionHandler.IRequest, 110 | context: IInlineCompletionContext 111 | ) => completer.fetch(request, context) 112 | }; 113 | } 114 | 115 | /** 116 | * Get the current AIChatModel. 117 | */ 118 | get currentChatModel(): AIChatModel | null { 119 | if (Private.getName() === 'None') { 120 | return null; 121 | } 122 | const currentProvider = Private.providers.get(Private.getName()) ?? null; 123 | 124 | const chatModel = Private.getChatModel(); 125 | if (chatModel === null) { 126 | return null; 127 | } 128 | if (currentProvider?.exposeChatModel ?? false) { 129 | // Expose the full chat model if expected. 130 | return chatModel as AIChatModel; 131 | } 132 | 133 | // Otherwise, we create a reduced AIChatModel interface. 134 | return { 135 | stream: (input: any, options?: any) => chatModel.stream(input, options) 136 | }; 137 | } 138 | 139 | /** 140 | * Get the settings schema of a given provider. 141 | */ 142 | getSettingsSchema(provider: string): JSONSchema7 { 143 | return (Private.providers.get(provider)?.settingsSchema?.properties || 144 | {}) as JSONSchema7; 145 | } 146 | 147 | /** 148 | * Get the instructions of a given provider. 149 | */ 150 | getInstructions(provider: string): string | undefined { 151 | return Private.providers.get(provider)?.instructions; 152 | } 153 | 154 | /** 155 | * Get the compatibility check function of a given provider. 156 | */ 157 | getCompatibilityCheck( 158 | provider: string 159 | ): (() => Promise) | undefined { 160 | return Private.providers.get(provider)?.compatibilityCheck; 161 | } 162 | 163 | /** 164 | * Format an error message from the current provider. 165 | */ 166 | formatErrorMessage(error: any): string { 167 | const currentProvider = Private.providers.get(Private.getName()) ?? null; 168 | if (currentProvider?.errorMessage) { 169 | return currentProvider?.errorMessage(error); 170 | } 171 | if (error.message) { 172 | return error.message; 173 | } 174 | return error; 175 | } 176 | 177 | /** 178 | * Get the current chat error; 179 | */ 180 | get chatError(): string { 181 | return this._chatError; 182 | } 183 | 184 | /** 185 | * Get the current completer error. 186 | */ 187 | get completerError(): string { 188 | return this._completerError; 189 | } 190 | 191 | /** 192 | * Set the providers (chat model and completer). 193 | * Creates the providers if the name has changed, otherwise only updates their config. 194 | * 195 | * @param options - An object with the name and the settings of the provider to use. 196 | */ 197 | async setProvider(options: ISetProviderOptions): Promise { 198 | const { name, settings } = options; 199 | const currentProvider = Private.providers.get(name) ?? null; 200 | if (currentProvider === null) { 201 | // The current provider may not be loaded when the settings are first loaded. 202 | // Let's defer the provider loading. 203 | this._deferredProvider = options; 204 | } else { 205 | this._deferredProvider = null; 206 | } 207 | 208 | const compatibilityCheck = this.getCompatibilityCheck(name); 209 | if (compatibilityCheck !== undefined) { 210 | const error = await compatibilityCheck(); 211 | if (error !== null) { 212 | this._chatError = error.trim(); 213 | this._completerError = error.trim(); 214 | Private.setName('None'); 215 | this._providerChanged.emit(); 216 | return; 217 | } 218 | } 219 | 220 | if (name === 'None') { 221 | this._chatError = ''; 222 | this._completerError = ''; 223 | } 224 | 225 | // Build a new settings object containing the secrets. 226 | const fullSettings: IDict = {}; 227 | for (const key of Object.keys(settings)) { 228 | if (settings[key] === SECRETS_REPLACEMENT) { 229 | const id = getSecretId(name, key); 230 | const secrets = await this._secretsManager?.get( 231 | Private.getToken(), 232 | SECRETS_NAMESPACE, 233 | id 234 | ); 235 | if (secrets !== undefined) { 236 | fullSettings[key] = secrets.value; 237 | } 238 | continue; 239 | } 240 | fullSettings[key] = settings[key]; 241 | } 242 | 243 | if (currentProvider?.completer !== undefined) { 244 | try { 245 | Private.setCompleter( 246 | new currentProvider.completer({ 247 | settings: fullSettings 248 | }) 249 | ); 250 | this._completerError = ''; 251 | } catch (e: any) { 252 | this._completerError = e.message; 253 | } 254 | } else { 255 | Private.setCompleter(null); 256 | } 257 | 258 | if (currentProvider?.chatModel !== undefined) { 259 | try { 260 | Private.setChatModel( 261 | new currentProvider.chatModel({ 262 | ...fullSettings 263 | }) 264 | ); 265 | this._chatError = ''; 266 | } catch (e: any) { 267 | this._chatError = e.message; 268 | Private.setChatModel(null); 269 | } 270 | } else { 271 | Private.setChatModel(null); 272 | } 273 | Private.setName(name); 274 | this._providerChanged.emit(); 275 | } 276 | 277 | /** 278 | * A signal emitting when the provider or its settings has changed. 279 | */ 280 | get providerChanged(): ISignal { 281 | return this._providerChanged; 282 | } 283 | 284 | private _secretsManager: ISecretsManager | null; 285 | private _providerChanged = new Signal(this); 286 | private _chatError: string = ''; 287 | private _completerError: string = ''; 288 | private _deferredProvider: ISetProviderOptions | null = null; 289 | } 290 | 291 | export namespace AIProviderRegistry { 292 | /** 293 | * The options for the LLM provider. 294 | */ 295 | export interface IOptions { 296 | /** 297 | * The secrets manager used in the application. 298 | */ 299 | secretsManager?: ISecretsManager; 300 | /** 301 | * The token used to request the secrets manager. 302 | */ 303 | token: symbol; 304 | } 305 | 306 | /** 307 | * The options for the Chat system prompt. 308 | */ 309 | export interface IPromptOptions { 310 | /** 311 | * The provider name. 312 | */ 313 | provider_name: string; 314 | } 315 | 316 | /** 317 | * This function indicates whether a key is writable in an object. 318 | * https://stackoverflow.com/questions/54724875/can-we-check-whether-property-is-readonly-in-typescript 319 | * 320 | * @param obj - An object extending the BaseLanguageModel interface. 321 | * @param key - A string as a key of the object. 322 | * @returns a boolean whether the key is writable or not. 323 | */ 324 | export function isWritable( 325 | obj: T, 326 | key: keyof T 327 | ) { 328 | const desc = 329 | Object.getOwnPropertyDescriptor(obj, key) || 330 | Object.getOwnPropertyDescriptor(Object.getPrototypeOf(obj), key) || 331 | {}; 332 | return Boolean(desc.writable); 333 | } 334 | 335 | /** 336 | * Update the config of a language model. 337 | * It only updates the writable attributes of the model. 338 | * 339 | * @param model - the model to update. 340 | * @param settings - the configuration s a JSON object. 341 | */ 342 | export function updateConfig( 343 | model: T, 344 | settings: ReadonlyPartialJSONObject 345 | ) { 346 | Object.entries(settings).forEach(([key, value], index) => { 347 | if (key in model) { 348 | const modelKey = key as keyof typeof model; 349 | if (isWritable(model, modelKey)) { 350 | // eslint-disable-next-line @typescript-eslint/ban-ts-comment 351 | // @ts-ignore 352 | model[modelKey] = value; 353 | } 354 | } 355 | }); 356 | } 357 | } 358 | 359 | namespace Private { 360 | /** 361 | * The token to use with the secrets manager, setter and getter. 362 | */ 363 | let secretsToken: symbol; 364 | export function setToken(value: symbol): void { 365 | secretsToken = value; 366 | } 367 | export function getToken(): symbol { 368 | return secretsToken; 369 | } 370 | 371 | /** 372 | * The providers map, in private namespace to prevent updating the 'exposeChatModel' 373 | * flag. 374 | */ 375 | export const providers = new Map(); 376 | 377 | /** 378 | * The name of the current provider, setter and getter. 379 | * It is in a private namespace to prevent updating it without updating the models. 380 | */ 381 | let name: string = 'None'; 382 | export function setName(value: string): void { 383 | name = value; 384 | } 385 | export function getName(): string { 386 | return name; 387 | } 388 | 389 | /** 390 | * The chat model setter and getter. 391 | */ 392 | let chatModel: BaseChatModel | null = null; 393 | export function setChatModel(model: BaseChatModel | null): void { 394 | chatModel = model; 395 | } 396 | export function getChatModel(): BaseChatModel | null { 397 | return chatModel; 398 | } 399 | 400 | /** 401 | * The completer setter and getter. 402 | */ 403 | let completer: IBaseCompleter | null = null; 404 | export function setCompleter(model: IBaseCompleter | null): void { 405 | completer = model; 406 | } 407 | export function getCompleter(): IBaseCompleter | null { 408 | return completer; 409 | } 410 | } 411 | -------------------------------------------------------------------------------- /src/settings/base.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "AI provider", 3 | "description": "Provider settings", 4 | "type": "object", 5 | "properties": {}, 6 | "additionalProperties": false 7 | } 8 | -------------------------------------------------------------------------------- /src/settings/index.ts: -------------------------------------------------------------------------------- 1 | export * from './panel'; 2 | export * from './utils'; 3 | -------------------------------------------------------------------------------- /src/settings/panel.tsx: -------------------------------------------------------------------------------- 1 | import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; 2 | import { ISettingRegistry } from '@jupyterlab/settingregistry'; 3 | import { 4 | Button, 5 | FormComponent, 6 | IFormRenderer 7 | } from '@jupyterlab/ui-components'; 8 | import { JSONExt } from '@lumino/coreutils'; 9 | import { IChangeEvent } from '@rjsf/core'; 10 | import type { FieldProps } from '@rjsf/utils'; 11 | import validator from '@rjsf/validator-ajv8'; 12 | import { JSONSchema7 } from 'json-schema'; 13 | import { ISecretsManager } from 'jupyter-secrets-manager'; 14 | import React from 'react'; 15 | 16 | import { getSecretId, SECRETS_REPLACEMENT } from '.'; 17 | import baseSettings from './base.json'; 18 | import { IAIProviderRegistry, IDict, PLUGIN_IDS } from '../tokens'; 19 | 20 | const MD_MIME_TYPE = 'text/markdown'; 21 | const STORAGE_NAME = '@jupyterlite/ai:settings'; 22 | const INSTRUCTION_CLASS = 'jp-AISettingsInstructions'; 23 | const ERROR_CLASS = 'jp-AISettingsError'; 24 | const SECRETS_NAMESPACE = PLUGIN_IDS.providerRegistry; 25 | 26 | export const aiSettingsRenderer = (options: { 27 | providerRegistry: IAIProviderRegistry; 28 | secretsToken?: symbol; 29 | rmRegistry?: IRenderMimeRegistry; 30 | secretsManager?: ISecretsManager; 31 | }): IFormRenderer => { 32 | const { secretsToken } = options; 33 | delete options.secretsToken; 34 | if (secretsToken) { 35 | Private.setToken(secretsToken); 36 | } 37 | return { 38 | fieldRenderer: (props: FieldProps) => { 39 | props.formContext = { ...props.formContext, ...options }; 40 | return ; 41 | } 42 | }; 43 | }; 44 | 45 | export interface ISettingsFormStates { 46 | schema: JSONSchema7; 47 | instruction: HTMLElement | null; 48 | compatibilityError: string | null; 49 | isModified?: boolean; 50 | } 51 | 52 | const WrappedFormComponent = (props: any): JSX.Element => { 53 | return ; 54 | }; 55 | 56 | export class AiSettings extends React.Component< 57 | FieldProps, 58 | ISettingsFormStates 59 | > { 60 | constructor(props: FieldProps) { 61 | super(props); 62 | if (!props.formContext.providerRegistry) { 63 | throw new Error( 64 | 'The provider registry is needed to enable the jupyterlite-ai settings panel' 65 | ); 66 | } 67 | this._providerRegistry = props.formContext.providerRegistry; 68 | this._rmRegistry = props.formContext.rmRegistry ?? null; 69 | this._secretsManager = props.formContext.secretsManager ?? null; 70 | this._settings = props.formContext.settings; 71 | 72 | const useSecretsManagerSetting = 73 | (this._settings.get('UseSecretsManager').composite as boolean) ?? true; 74 | this._useSecretsManager = 75 | useSecretsManagerSetting && this._secretsManager !== null; 76 | 77 | // Initialize the providers schema. 78 | const providerSchema = JSONExt.deepCopy(baseSettings) as any; 79 | providerSchema.properties.provider = { 80 | type: 'string', 81 | title: 'Provider', 82 | description: 'The AI provider to use for chat and completion', 83 | default: 'None', 84 | enum: ['None'].concat(this._providerRegistry.providers) 85 | }; 86 | this._providerSchema = providerSchema as JSONSchema7; 87 | 88 | // Check if there is saved values in local storage, otherwise use the settings from 89 | // the setting registry (leads to default if there are no user settings). 90 | const storageSettings = localStorage.getItem(STORAGE_NAME); 91 | if (storageSettings === null) { 92 | const labSettings = this._settings.get('AIprovider').composite; 93 | if (labSettings && Object.keys(labSettings).includes('provider')) { 94 | // Get the provider name. 95 | const provider = Object.entries(labSettings).find( 96 | v => v[0] === 'provider' 97 | )?.[1] as string; 98 | // Save the settings. 99 | const settings: any = { 100 | _current: provider 101 | }; 102 | settings[provider] = labSettings; 103 | localStorage.setItem(STORAGE_NAME, JSON.stringify(settings)); 104 | } 105 | } 106 | 107 | // Initialize the settings from the saved ones. 108 | this._provider = this.getCurrentProvider(); 109 | 110 | // Initialize the schema. 111 | const schema = this._buildSchema(); 112 | 113 | // Initialize the current settings. 114 | const isModified = this._updatedFormData( 115 | this.getSettingsFromLocalStorage() 116 | ); 117 | 118 | this.state = { 119 | schema, 120 | instruction: null, 121 | compatibilityError: null, 122 | isModified: isModified 123 | }; 124 | this._renderInstruction(); 125 | 126 | this._checkProviderCompatibility(); 127 | 128 | // Update the setting registry. 129 | this.saveSettingsToRegistry(); 130 | 131 | this._secretsManager?.fieldVisibilityChanged.connect( 132 | this._fieldVisibilityChanged 133 | ); 134 | 135 | this._settings.changed.connect(this._settingsChanged); 136 | } 137 | 138 | async componentDidUpdate(): Promise { 139 | if (!this._secretsManager || !this._useSecretsManager) { 140 | return; 141 | } 142 | 143 | // Attach the password inputs to the secrets manager. 144 | await this._secretsManager.detachAll(Private.getToken(), SECRETS_NAMESPACE); 145 | const inputs = this._formRef.current?.getElementsByTagName('input') || []; 146 | for (let i = 0; i < inputs.length; i++) { 147 | if (inputs[i].type.toLowerCase() === 'password') { 148 | const label = inputs[i].getAttribute('label'); 149 | if (label) { 150 | const id = getSecretId(this._provider, label); 151 | this._secretsManager.attach( 152 | Private.getToken(), 153 | SECRETS_NAMESPACE, 154 | id, 155 | inputs[i], 156 | (value: string) => this._onPasswordUpdated(label, value) 157 | ); 158 | } 159 | } 160 | } 161 | } 162 | 163 | componentWillUnmount(): void { 164 | this._settings.changed.disconnect(this._settingsChanged); 165 | this._secretsManager?.fieldVisibilityChanged.disconnect( 166 | this._fieldVisibilityChanged 167 | ); 168 | if (!this._secretsManager || !this._useSecretsManager) { 169 | return; 170 | } 171 | this._secretsManager.detachAll(Private.getToken(), SECRETS_NAMESPACE); 172 | } 173 | 174 | /** 175 | * Get the current provider from the local storage. 176 | */ 177 | getCurrentProvider(): string { 178 | const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}'); 179 | return settings['_current'] ?? 'None'; 180 | } 181 | 182 | /** 183 | * Save the current provider to the local storage. 184 | */ 185 | saveCurrentProvider(): void { 186 | const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}'); 187 | settings['_current'] = this._provider; 188 | localStorage.setItem(STORAGE_NAME, JSON.stringify(settings)); 189 | } 190 | 191 | /** 192 | * Get settings from local storage for a given provider. 193 | */ 194 | getSettingsFromLocalStorage(): IDict { 195 | const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}'); 196 | return settings[this._provider] ?? { provider: this._provider }; 197 | } 198 | 199 | /** 200 | * Save settings in local storage for a given provider. 201 | */ 202 | saveSettingsToLocalStorage() { 203 | const currentSettings = { ...this._currentSettings }; 204 | const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) ?? '{}'); 205 | // Do not save secrets in local storage if using the secrets manager. 206 | if (this._useSecretsManager) { 207 | this._secretFields.forEach(field => delete currentSettings[field]); 208 | } 209 | settings[this._provider] = currentSettings; 210 | localStorage.setItem(STORAGE_NAME, JSON.stringify(settings)); 211 | } 212 | 213 | /** 214 | * Save the settings to the setting registry. 215 | */ 216 | saveSettingsToRegistry(): void { 217 | const sanitizedSettings = { ...this._currentSettings }; 218 | if (this._useSecretsManager) { 219 | this._secretFields.forEach(field => { 220 | sanitizedSettings[field] = SECRETS_REPLACEMENT; 221 | }); 222 | } 223 | this._settings 224 | .set('AIprovider', { provider: this._provider, ...sanitizedSettings }) 225 | .catch(console.error); 226 | } 227 | 228 | /** 229 | * Triggered when the settings has changed. 230 | */ 231 | private _settingsChanged = (settings: ISettingRegistry.ISettings) => { 232 | this._updateUseSecretsManager( 233 | (this._settings.get('UseSecretsManager').composite as boolean) ?? true 234 | ); 235 | }; 236 | 237 | /** 238 | * Triggered when the secret fields visibility has changed. 239 | */ 240 | private _fieldVisibilityChanged = ( 241 | _: ISecretsManager, 242 | value: boolean 243 | ): void => { 244 | if (this._useSecretsManager) { 245 | this._updateSchema(); 246 | } 247 | }; 248 | 249 | /** 250 | * Update the settings whether the secrets manager is used or not. 251 | * 252 | * @param value - whether to use the secrets manager or not. 253 | */ 254 | private _updateUseSecretsManager = (value: boolean) => { 255 | // No-op if the value did not change or the secrets manager has not been provided. 256 | if (value === this._useSecretsManager || this._secretsManager === null) { 257 | return; 258 | } 259 | 260 | // Update the secrets manager. 261 | this._useSecretsManager = value; 262 | if (!value) { 263 | // Detach all the password inputs attached to the secrets manager, and save the 264 | // current settings to the local storage to save the password. 265 | this._secretsManager.detachAll(Private.getToken(), SECRETS_NAMESPACE); 266 | } else { 267 | // Remove all the keys stored locally. 268 | const settings = JSON.parse(localStorage.getItem(STORAGE_NAME) || '{}'); 269 | Object.keys(settings).forEach(provider => { 270 | Object.keys(settings[provider]) 271 | .filter(key => key.toLowerCase().includes('key')) 272 | .forEach(key => { 273 | delete settings[provider][key]; 274 | }); 275 | }); 276 | localStorage.setItem(STORAGE_NAME, JSON.stringify(settings)); 277 | } 278 | this._updateSchema(); 279 | this.saveSettingsToLocalStorage(); 280 | this.saveSettingsToRegistry(); 281 | }; 282 | 283 | /** 284 | * Build the schema for a given provider. 285 | */ 286 | private _buildSchema(): JSONSchema7 { 287 | const schema = JSONExt.deepCopy(baseSettings) as any; 288 | this._uiSchema = {}; 289 | const settingsSchema = this._providerRegistry.getSettingsSchema( 290 | this._provider 291 | ); 292 | 293 | this._secretFields = []; 294 | this._defaultFormData = {}; 295 | if (settingsSchema) { 296 | Object.entries(settingsSchema).forEach(([key, value]) => { 297 | if (key.toLowerCase().includes('key')) { 298 | this._secretFields.push(key); 299 | 300 | // If the secrets manager is not used, do not show the secrets fields. 301 | // If the secrets manager is used, check if the fields should be visible. 302 | const showSecretFields = 303 | !this._useSecretsManager || 304 | (this._secretsManager?.secretFieldsVisibility ?? true); 305 | if (!showSecretFields) { 306 | return; 307 | } 308 | 309 | this._uiSchema[key] = { 'ui:widget': 'password' }; 310 | } 311 | schema.properties[key] = value; 312 | if (value.default !== undefined) { 313 | this._defaultFormData[key] = value.default; 314 | } 315 | }); 316 | } 317 | 318 | return schema as JSONSchema7; 319 | } 320 | 321 | /** 322 | * Update the schema state for the given provider, that trigger the re-rendering of 323 | * the component. 324 | */ 325 | private _updateSchema() { 326 | const schema = this._buildSchema(); 327 | this.setState({ schema }); 328 | } 329 | 330 | /** 331 | * Render the markdown instructions for the current provider. 332 | */ 333 | private async _renderInstruction(): Promise { 334 | let instructions = this._providerRegistry.getInstructions(this._provider); 335 | if (!this._rmRegistry || !instructions) { 336 | this.setState({ instruction: null }); 337 | return; 338 | } 339 | instructions = `---\n\n${instructions}\n\n---`; 340 | const renderer = this._rmRegistry.createRenderer(MD_MIME_TYPE); 341 | const model = this._rmRegistry.createModel({ 342 | data: { [MD_MIME_TYPE]: instructions } 343 | }); 344 | await renderer.renderModel(model); 345 | this.setState({ instruction: renderer.node }); 346 | } 347 | 348 | /** 349 | * Check for compatibility of the provider with the current environment. 350 | * If the provider is not compatible, display an error message. 351 | */ 352 | private async _checkProviderCompatibility(): Promise { 353 | const compatibilityCheck = this._providerRegistry.getCompatibilityCheck( 354 | this._provider 355 | ); 356 | if (!compatibilityCheck) { 357 | this.setState({ compatibilityError: null }); 358 | return; 359 | } 360 | const error = await compatibilityCheck(); 361 | if (!error) { 362 | this.setState({ compatibilityError: null }); 363 | return; 364 | } 365 | const errorDiv = document.createElement('div'); 366 | errorDiv.className = ERROR_CLASS; 367 | errorDiv.innerHTML = error; 368 | this.setState({ compatibilityError: error }); 369 | } 370 | 371 | /** 372 | * Triggered when the provider has changed, to update the schema and values. 373 | * Update the Jupyterlab settings accordingly. 374 | */ 375 | private _onProviderChanged = (e: IChangeEvent) => { 376 | const provider = e.formData.provider; 377 | if (provider === this._currentSettings.provider) { 378 | return; 379 | } 380 | this._provider = provider; 381 | this.saveCurrentProvider(); 382 | this._updateSchema(); 383 | this._renderInstruction(); 384 | this._checkProviderCompatibility(); 385 | 386 | // Initialize the current settings. 387 | const isModified = this._updatedFormData( 388 | this.getSettingsFromLocalStorage() 389 | ); 390 | if (isModified !== this.state.isModified) { 391 | this.setState({ isModified }); 392 | } 393 | this.saveSettingsToRegistry(); 394 | }; 395 | 396 | /** 397 | * Callback function called when the password input has been programmatically updated 398 | * with the secret manager. 399 | */ 400 | private _onPasswordUpdated = (fieldName: string, value: string) => { 401 | this._currentSettings[fieldName] = value; 402 | this.saveSettingsToRegistry(); 403 | }; 404 | 405 | /** 406 | * Update the current settings with the new values from the form. 407 | * 408 | * @param data - The form data to update. 409 | * @returns - Boolean whether the form is not the default one. 410 | */ 411 | private _updatedFormData(data: IDict): boolean { 412 | let isModified = false; 413 | Object.entries(data).forEach(([key, value]) => { 414 | if (this._defaultFormData[key] !== undefined) { 415 | if (value === undefined) { 416 | const schemaProperty = this.state.schema.properties?.[ 417 | key 418 | ] as JSONSchema7; 419 | if (schemaProperty.type === 'string') { 420 | data[key] = ''; 421 | } 422 | } 423 | if (value !== this._defaultFormData[key]) { 424 | isModified = true; 425 | } 426 | } 427 | }); 428 | this._currentSettings = JSONExt.deepCopy(data); 429 | return isModified; 430 | } 431 | 432 | /** 433 | * Triggered when the form value has changed, to update the current settings and save 434 | * it in local storage. 435 | * Update the Jupyterlab settings accordingly. 436 | */ 437 | private _onFormChanged = (e: IChangeEvent): void => { 438 | const { formData } = e; 439 | const isModified = this._updatedFormData(formData); 440 | this.saveSettingsToLocalStorage(); 441 | this.saveSettingsToRegistry(); 442 | if (isModified !== this.state.isModified) { 443 | this.setState({ isModified }); 444 | } 445 | }; 446 | 447 | /** 448 | * Handler for the "Restore to defaults" button - clears all 449 | * modified settings then calls `setFormData` to restore the 450 | * values. 451 | */ 452 | private _reset = async (event: React.MouseEvent): Promise => { 453 | event.stopPropagation(); 454 | this._currentSettings = { 455 | ...this._currentSettings, 456 | ...this._defaultFormData 457 | }; 458 | this.saveSettingsToLocalStorage(); 459 | this.saveSettingsToRegistry(); 460 | this.setState({ isModified: false }); 461 | }; 462 | 463 | render(): JSX.Element { 464 | return ( 465 |
466 | 471 | {this.state.compatibilityError !== null && ( 472 |
473 | 474 | {this.state.compatibilityError} 475 |
476 | )} 477 | {this.state.instruction !== null && ( 478 |
479 | Instructions 480 | 482 | node && node.replaceChildren(this.state.instruction!) 483 | } 484 | /> 485 |
486 | )} 487 |
488 |

{this._provider}

489 |
490 | {this.state.isModified && ( 491 | 494 | )} 495 |
496 |
497 | 508 |
509 | ); 510 | } 511 | 512 | private _providerRegistry: IAIProviderRegistry; 513 | private _provider: string; 514 | private _providerSchema: JSONSchema7; 515 | private _useSecretsManager: boolean; 516 | private _rmRegistry: IRenderMimeRegistry | null; 517 | private _secretsManager: ISecretsManager | null; 518 | private _currentSettings: IDict = { provider: 'None' }; 519 | private _uiSchema: IDict = {}; 520 | private _settings: ISettingRegistry.ISettings; 521 | private _formRef = React.createRef(); 522 | private _secretFields: string[] = []; 523 | private _defaultFormData: IDict = {}; 524 | } 525 | 526 | namespace Private { 527 | /** 528 | * The token to use with the secrets manager. 529 | */ 530 | let secretsToken: symbol; 531 | 532 | /** 533 | * Set of the token. 534 | */ 535 | export function setToken(value: symbol): void { 536 | secretsToken = value; 537 | } 538 | 539 | /** 540 | * get the token. 541 | */ 542 | export function getToken(): symbol { 543 | return secretsToken; 544 | } 545 | } 546 | -------------------------------------------------------------------------------- /src/settings/utils.ts: -------------------------------------------------------------------------------- 1 | export const SECRETS_REPLACEMENT = '***'; 2 | 3 | export function getSecretId(provider: string, label: string) { 4 | return `${provider}-${label}`; 5 | } 6 | -------------------------------------------------------------------------------- /src/tokens.ts: -------------------------------------------------------------------------------- 1 | import { BaseChatModel } from '@langchain/core/language_models/chat_models'; 2 | import { ReadonlyPartialJSONObject, Token } from '@lumino/coreutils'; 3 | import { ISignal } from '@lumino/signaling'; 4 | import { JSONSchema7 } from 'json-schema'; 5 | 6 | import { IBaseCompleter } from './base-completer'; 7 | import { AIChatModel, AICompleter } from './types/ai-model'; 8 | 9 | export const PLUGIN_IDS = { 10 | chat: '@jupyterlite/ai:chat', 11 | chatCommandRegistry: '@jupyterlite/ai:autocompletion-registry', 12 | completer: '@jupyterlite/ai:completer', 13 | providerRegistry: '@jupyterlite/ai:provider-registry', 14 | settingsConnector: '@jupyterlite/ai:settings-connector' 15 | }; 16 | 17 | export interface IDict { 18 | [key: string]: T; 19 | } 20 | 21 | export interface IType { 22 | new (...args: any[]): T; 23 | } 24 | 25 | /** 26 | * The provider interface. 27 | */ 28 | export interface IAIProvider { 29 | /** 30 | * The name of the provider. 31 | */ 32 | name: string; 33 | /** 34 | * The chat model class to use. 35 | */ 36 | chatModel?: IType; 37 | /** 38 | * The completer class to use. 39 | */ 40 | completer?: IType; 41 | /** 42 | * the settings schema for the provider. 43 | */ 44 | settingsSchema?: any; 45 | /** 46 | * The instructions to be displayed in the settings, as helper to use the provider. 47 | * A markdown renderer is used to render the instructions. 48 | */ 49 | instructions?: string; 50 | /** 51 | * A function that extract the error message from the provider API error. 52 | * Default to `(error) => error.message`. 53 | */ 54 | errorMessage?: (error: any) => string; 55 | /** 56 | * Compatibility check function, to determine if the provider is compatible with the 57 | * current environment. 58 | */ 59 | compatibilityCheck?: () => Promise; 60 | /** 61 | * Whether to expose or not the chat model. 62 | * 63 | * ### CAUTION 64 | * This flag will expose the whole chat model API, which may contain private keys. 65 | * Be sure to use it with a model that does not expose sensitive information in the 66 | * API. 67 | */ 68 | exposeChatModel?: boolean; 69 | } 70 | 71 | /** 72 | * The provider registry interface. 73 | */ 74 | export interface IAIProviderRegistry { 75 | /** 76 | * Get the list of provider names. 77 | */ 78 | readonly providers: string[]; 79 | /** 80 | * Add a new provider. 81 | */ 82 | add(provider: IAIProvider): void; 83 | /** 84 | * Get the current provider name. 85 | */ 86 | currentName: string; 87 | /** 88 | * Get the current completer of the completion provider. 89 | */ 90 | currentCompleter: AICompleter | null; 91 | /** 92 | * Get the current llm chat model. 93 | */ 94 | currentChatModel: AIChatModel | null; 95 | /** 96 | * Get the settings schema of a given provider. 97 | */ 98 | getSettingsSchema(provider: string): JSONSchema7; 99 | /** 100 | * Get the instructions of a given provider. 101 | */ 102 | getInstructions(provider: string): string | undefined; 103 | /** 104 | * Get the compatibility check function of a given provider. 105 | */ 106 | getCompatibilityCheck( 107 | provider: string 108 | ): (() => Promise) | undefined; 109 | /** 110 | * Format an error message from the current provider. 111 | */ 112 | formatErrorMessage(error: any): string; 113 | /** 114 | * Set the providers (chat model and completer). 115 | * Creates the providers if the name has changed, otherwise only updates their config. 116 | * 117 | * @param options - an object with the name and the settings of the provider to use. 118 | */ 119 | setProvider(options: ISetProviderOptions): void; 120 | /** 121 | * A signal emitting when the provider or its settings has changed. 122 | */ 123 | readonly providerChanged: ISignal; 124 | /** 125 | * Get the current chat error; 126 | */ 127 | readonly chatError: string; 128 | /** 129 | * get the current completer error. 130 | */ 131 | readonly completerError: string; 132 | } 133 | 134 | /** 135 | * The set provider options. 136 | */ 137 | export interface ISetProviderOptions { 138 | /** 139 | * The name of the provider. 140 | */ 141 | name: string; 142 | /** 143 | * The settings of the provider. 144 | */ 145 | settings: ReadonlyPartialJSONObject; 146 | } 147 | 148 | /** 149 | * The provider registry token. 150 | */ 151 | export const IAIProviderRegistry = new Token( 152 | '@jupyterlite/ai:provider-registry', 153 | 'Provider for chat and completion LLM provider' 154 | ); 155 | -------------------------------------------------------------------------------- /src/types/ai-model.ts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Jupyter Development Team. 3 | * Distributed under the terms of the Modified BSD License. 4 | */ 5 | 6 | import { 7 | CompletionHandler, 8 | IInlineCompletionContext 9 | } from '@jupyterlab/completer'; 10 | import { IterableReadableStream } from '@langchain/core/utils/stream'; 11 | 12 | /** 13 | * The reduced AI chat model interface. 14 | */ 15 | export type AIChatModel = { 16 | /** 17 | * The stream function of the chat model. 18 | */ 19 | stream: (input: any, options?: any) => Promise>; 20 | }; 21 | 22 | /** 23 | * The reduced AI completer interface. 24 | */ 25 | export type AICompleter = { 26 | /** 27 | * The fetch function of the completer. 28 | */ 29 | fetch: ( 30 | request: CompletionHandler.IRequest, 31 | context: IInlineCompletionContext 32 | ) => Promise; 33 | /** 34 | * The optional request completion function of the completer. 35 | */ 36 | requestCompletion?: () => void; 37 | }; 38 | -------------------------------------------------------------------------------- /src/types/service-worker.d.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Type declarations for Service Worker APIs not included in standard TypeScript libs 3 | */ 4 | interface ExtendableMessageEvent extends MessageEvent { 5 | waitUntil(f: Promise): void; 6 | } 7 | -------------------------------------------------------------------------------- /style/base.css: -------------------------------------------------------------------------------- 1 | /* 2 | See the JupyterLab Developer Guide for useful CSS Patterns: 3 | 4 | https://jupyterlab.readthedocs.io/en/stable/developer/css.html 5 | */ 6 | 7 | @import url('@jupyter/chat/style/index.css'); 8 | 9 | .jp-AISettingsInstructions { 10 | font-size: var(--jp-content-font-size1); 11 | } 12 | 13 | .jp-AISettingsError { 14 | color: var(--jp-error-color1); 15 | font-size: var(--jp-content-font-size1); 16 | } 17 | -------------------------------------------------------------------------------- /style/icons/jupyternaut-lite.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /style/index.css: -------------------------------------------------------------------------------- 1 | @import url('base.css'); 2 | -------------------------------------------------------------------------------- /style/index.js: -------------------------------------------------------------------------------- 1 | import './base.css'; 2 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowSyntheticDefaultImports": true, 4 | "composite": true, 5 | "declaration": true, 6 | "esModuleInterop": true, 7 | "incremental": true, 8 | "jsx": "react", 9 | "lib": ["ES2022", "DOM"], 10 | "module": "esnext", 11 | "moduleResolution": "node", 12 | "noEmitOnError": true, 13 | "noImplicitAny": true, 14 | "noUnusedLocals": true, 15 | "paths": { 16 | "ollama/browser": ["./node_modules/@langchain/ollama/dist"] 17 | }, 18 | "preserveWatchOutput": true, 19 | "resolveJsonModule": true, 20 | "outDir": "lib", 21 | "rootDir": "src", 22 | "strict": true, 23 | "strictNullChecks": true, 24 | "target": "ES2022", 25 | "typeRoots": ["./node_modules/@types", "./src/types"] 26 | }, 27 | "include": [ 28 | "src/*", 29 | "src/**/*", 30 | "src/settings/base.json", 31 | "src/default-providers/**/*.json" 32 | ] 33 | } 34 | --------------------------------------------------------------------------------