├── .env.example ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── release.yml ├── .gitignore ├── .ignore ├── Dockerfile ├── LICENSE ├── README.md ├── assets ├── example.gif ├── example2.gif └── template.md ├── bun.lockb ├── bunfig.toml ├── docker-compose.yml ├── package.json ├── src ├── app.ts ├── config.ts ├── constants.ts ├── events │ ├── actions.ts │ ├── completions.ts │ └── index.ts ├── models │ ├── api.ts │ ├── assistant.ts │ ├── codeium-auth.ts │ ├── copilot-auth.ts │ ├── lsp.test.ts │ ├── lsp.ts │ └── lsp.types.ts ├── providers │ ├── codeium.test.ts │ ├── codeium.ts │ ├── codeium.types.ts │ ├── github.test.ts │ ├── github.ts │ ├── github.types.ts │ ├── ollama.test.ts │ ├── ollama.ts │ ├── ollama.types.ts │ ├── openai.test.ts │ ├── openai.ts │ └── openai.types.ts ├── utils.test.ts └── utils.ts └── tsconfig.json /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_MODEL=gpt-3.5-turbo-16k 2 | OPENAI_MAX_TOKENS=8096 3 | OPENAI_API_KEY= 4 | LOG_FILE=/app/helix-gpt.log 5 | OPENAI_CONTEXT= 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **helix-editor version** 11 | Exact version of the Helix editor you are using.. 12 | 13 | **helix-gpt version** 14 | Exact version of Helix GPT. 15 | 16 | **Describe the bug** 17 | A clear and concise description of what the bug is. 18 | 19 | **helix-gpt logs** 20 | Helix GPT output logs. Pass `--logFile /app/helix-gpt.log` to save them. 21 | 22 | **helix logs** 23 | Helix editor output logs. Ensure you start helix in verbose mode `hx -v example.file` 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | tag: 6 | type: string 7 | description: What is the release tag? 8 | required: true 9 | jobs: 10 | build: 11 | name: build 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout sources 15 | uses: actions/checkout@v4 16 | with: 17 | fetch-tags: true 18 | fetch-depth: 0 19 | token: ${{ secrets.GH_TOKEN }} 20 | 21 | - uses: oven-sh/setup-bun@v1 22 | with: 23 | bun-version: latest 24 | 25 | - name: Install dependencies 26 | run: bun install 27 | 28 | - name: Run tests 29 | run: bun run test 30 | 31 | - name: Build binary 32 | run: bun run build:bin 33 | 34 | - name: Build smol 35 | run: bun run build:smol 36 | 37 | - name: Set release 38 | shell: bash 39 | run: | 40 | export VERSION="${{ github.event.inputs.tag }}" 41 | export ARCH=x86_64-linux 42 | export APP=helix-gpt 43 | export OUTPUT="$APP-$VERSION-$ARCH" 44 | 45 | mv dist/helix-gpt dist/$OUTPUT 46 | mv dist/helix-gpt.js "dist/$APP-$VERSION.js" 47 | 48 | - name: Tag release 49 | run: | 50 | git config --global user.name "Leon" 51 | git config --global user.email "leon@nx.ie" 52 | sed -e 's//${{ github.event.inputs.tag }}/g' assets/template.md > README.md 53 | git add README.md 54 | git commit -m 'release: version ${{ github.event.inputs.tag }}' 55 | git push origin master 56 | latest_tag=$(git describe --tags `git rev-list --tags --max-count=1`) 57 | messages=$(git log $latest_tag..HEAD --pretty=format:"%h - %s") 58 | git tag -m "$messages" -a ${{ github.event.inputs.tag }} 59 | git push origin ${{ github.event.inputs.tag }} 60 | 61 | - uses: actions/upload-artifact@v4 62 | if: vars.RUNNER != 'act' 63 | with: 64 | name: bins 65 | path: dist 66 | 67 | publish: 68 | name: Publish 69 | needs: [build] 70 | runs-on: ubuntu-latest 71 | steps: 72 | - name: Checkout sources 73 | uses: actions/checkout@v4 74 | 75 | - uses: actions/download-artifact@v4 76 | 77 | - name: Build archive 78 | shell: bash 79 | run: | 80 | set -ex 81 | 82 | source="$(pwd)" 83 | cd "$(mktemp -d)" 84 | mv $source/bins* . 85 | mkdir dist 86 | 87 | for bin in bins/*; do 88 | filename=$(basename ${bin}) 89 | 90 | if [[ "$bin" == *.js ]]; then 91 | mv $bin dist/$filename 92 | continue 93 | fi 94 | 95 | tar -C `dirname $bin` -czvf dist/$filename.tar.gz --transform 's,^.*/,,g' `basename $bin` 96 | done 97 | 98 | tar -czvf dist/helix-gpt-${{ github.event.inputs.tag }}-source.tar.gz -C $source . 99 | mv dist $source/ 100 | 101 | - name: Upload binaries to release 102 | if: vars.RUNNER != 'act' 103 | uses: svenstaro/upload-release-action@v2 104 | with: 105 | repo_token: ${{ secrets.GH_TOKEN }} 106 | file: dist/* 107 | file_glob: true 108 | tag: ${{ github.event.inputs.tag }} 109 | release_name: "${{ github.event.inputs.tag }}" 110 | overwrite: true 111 | 112 | - name: Upload binaries as artifact 113 | if: vars.RUNNER != 'act' 114 | uses: actions/upload-artifact@v4 115 | with: 116 | name: release 117 | path: dist/* 118 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore 2 | 3 | # Logs 4 | 5 | logs 6 | _.log 7 | npm-debug.log_ 8 | yarn-debug.log* 9 | yarn-error.log* 10 | lerna-debug.log* 11 | .pnpm-debug.log* 12 | 13 | # Caches 14 | 15 | .cache 16 | 17 | # Diagnostic reports (https://nodejs.org/api/report.html) 18 | 19 | report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json 20 | 21 | # Runtime data 22 | 23 | pids 24 | _.pid 25 | _.seed 26 | *.pid.lock 27 | 28 | # Directory for instrumented libs generated by jscoverage/JSCover 29 | 30 | lib-cov 31 | 32 | # Coverage directory used by tools like istanbul 33 | 34 | coverage 35 | *.lcov 36 | 37 | # nyc test coverage 38 | 39 | .nyc_output 40 | 41 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 42 | 43 | .grunt 44 | 45 | # Bower dependency directory (https://bower.io/) 46 | 47 | bower_components 48 | 49 | # node-waf configuration 50 | 51 | .lock-wscript 52 | 53 | # Compiled binary addons (https://nodejs.org/api/addons.html) 54 | 55 | build/Release 56 | 57 | # Dependency directories 58 | 59 | node_modules/ 60 | jspm_packages/ 61 | 62 | # Snowpack dependency directory (https://snowpack.dev/) 63 | 64 | web_modules/ 65 | 66 | # TypeScript cache 67 | 68 | *.tsbuildinfo 69 | 70 | # Optional npm cache directory 71 | 72 | .npm 73 | 74 | # Optional eslint cache 75 | 76 | .eslintcache 77 | 78 | # Optional stylelint cache 79 | 80 | .stylelintcache 81 | 82 | # Microbundle cache 83 | 84 | .rpt2_cache/ 85 | .rts2_cache_cjs/ 86 | .rts2_cache_es/ 87 | .rts2_cache_umd/ 88 | 89 | # Optional REPL history 90 | 91 | .node_repl_history 92 | 93 | # Output of 'npm pack' 94 | 95 | *.tgz 96 | 97 | # Yarn Integrity file 98 | 99 | .yarn-integrity 100 | 101 | # dotenv environment variable files 102 | 103 | .env 104 | .env.development.local 105 | .env.test.local 106 | .env.production.local 107 | .env.local 108 | 109 | # parcel-bundler cache (https://parceljs.org/) 110 | 111 | .parcel-cache 112 | 113 | # Next.js build output 114 | 115 | .next 116 | out 117 | 118 | # Nuxt.js build / generate output 119 | 120 | .nuxt 121 | 122 | # Gatsby files 123 | 124 | # Comment in the public line in if your project uses Gatsby and not Next.js 125 | 126 | # https://nextjs.org/blog/next-9-1#public-directory-support 127 | 128 | # public 129 | 130 | # vuepress build output 131 | 132 | .vuepress/dist 133 | 134 | # vuepress v2.x temp and cache directory 135 | 136 | .temp 137 | 138 | # Docusaurus cache and generated files 139 | 140 | .docusaurus 141 | 142 | # Serverless directories 143 | 144 | .serverless/ 145 | 146 | # FuseBox cache 147 | 148 | .fusebox/ 149 | 150 | # DynamoDB Local files 151 | 152 | .dynamodb/ 153 | 154 | # TernJS port file 155 | 156 | .tern-port 157 | 158 | # Stores VSCode versions used for testing VSCode extensions 159 | 160 | .vscode-test 161 | 162 | # yarn v2 163 | 164 | .yarn/cache 165 | .yarn/unplugged 166 | .yarn/build-state.yml 167 | .yarn/install-state.gz 168 | .pnp.* 169 | 170 | # IntelliJ based IDEs 171 | .idea 172 | 173 | # Finder (MacOS) folder config 174 | .DS_Store 175 | 176 | helix-gpt.log 177 | dist/ 178 | -------------------------------------------------------------------------------- /.ignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | bun.lockb 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GLIBC_RELEASE=2.34-r0 2 | ARG release=latest 3 | 4 | FROM alpine:latest as get-latest 5 | 6 | WORKDIR /tmp 7 | # get bun latest release 8 | ADD https://github.com/oven-sh/bun/releases/latest/download/bun-linux-x64.zip bun-linux-x64.zip 9 | 10 | FROM alpine:latest as get-canary 11 | 12 | WORKDIR /tmp 13 | # get bun canary release 14 | ADD https://github.com/oven-sh/bun/releases/download/canary/bun-linux-x64.zip bun-linux-x64.zip 15 | 16 | FROM get-${release} as get-release 17 | 18 | RUN apk --no-cache add unzip 19 | RUN unzip bun-linux-x64.zip && chmod +x ./bun-linux-x64/bun 20 | 21 | # get glibc 22 | ARG GLIBC_RELEASE 23 | RUN wget https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \ 24 | wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/${GLIBC_RELEASE}/glibc-${GLIBC_RELEASE}.apk 25 | 26 | ### Helix build ### 27 | FROM alpine:latest as helix 28 | RUN apk --no-cache add helix alpine-sdk 29 | RUN hx --grammar fetch; exit 0 30 | RUN hx --grammar build; exit 0 31 | 32 | ### FINAL IMAGE ### 33 | FROM alpine:latest as final 34 | 35 | ARG GLIBC_RELEASE 36 | 37 | COPY --from=get-release /tmp/bun-linux-x64/bun /usr/local/bin/ 38 | COPY --from=get-release /tmp/sgerrand.rsa.pub /etc/apk/keys 39 | COPY --from=get-release /tmp/glibc-${GLIBC_RELEASE}.apk /tmp 40 | COPY --from=helix /usr/bin/hx /usr/bin/hx 41 | COPY --from=helix /usr/share/helix /usr/share/helix 42 | COPY --from=helix /root/.config/helix/runtime/grammars /usr/share/helix/runtime/grammars 43 | 44 | # install glibc 45 | RUN apk --no-cache add --force-overwrite libgcc bash /tmp/glibc-${GLIBC_RELEASE}.apk && \ 46 | # cleanup 47 | rm /etc/apk/keys/sgerrand.rsa.pub && \ 48 | rm /tmp/glibc-${GLIBC_RELEASE}.apk && \ 49 | # smoke test 50 | bun --version 51 | 52 | RUN echo -e '#!/bin/sh\n\ 53 | set -e\n\ 54 | if [ "${1#-}" != "${1}" ] || [ -z "$(command -v "${1}")" ]; then\n\ 55 | set -- bun "$@"\n\ 56 | fi\n\ 57 | exec "$@"\n ' > /entrypoint.sh 58 | 59 | ENV PATH="${PATH}:/root/.bun/bin" 60 | ADD ./.helix /root/.config/helix 61 | RUN ln -s /usr/local/bin/bun /usr/local/bin/node 62 | RUN bun install -g typescript typescript-language-server 63 | RUN chmod +x /entrypoint.sh 64 | WORKDIR /app 65 | 66 | ENTRYPOINT [ "/entrypoint.sh" ] 67 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # helix-gpt 2 | 3 | ![Build Status](https://github.com/leona/helix-gpt/actions/workflows/release.yml/badge.svg) 4 | ![Github Release](https://img.shields.io/badge/release-v0.34-blue) 5 | 6 | Code assistant language server for [Helix](https://github.com/helix-editor/helix) with support for Copilot/OpenAI/Codeium. 7 | 8 | Completion example 9 | 10 | ![helix-gpt example](https://github.com/leona/helix-gpt/raw/master/assets/example.gif) 11 | 12 | Code actions example (space + a) 13 | 14 | ![helix-gpt example](https://github.com/leona/helix-gpt/raw/master/assets/example2.gif) 15 | 16 | Available code actions: `resolveDiagnostics` `generateDocs` `improveCode` `refactorFromComment` `writeTest` 17 | 18 | ### How? 19 | 20 | When a trigger character is pressed it will request a completion and use the entire file as context. 21 | Default triggers characters: `["{", "(", " "]` can be overwritten with `--triggerCharacters "{||(|| "` 22 | 23 | Use `ctrl+x` to manually trigger completions, and `space+a` to trigger code actions that only use the selected code as context. 24 | 25 | ### Install 26 | 27 | This was made to run with [Bun](https://bun.sh/), but you can also use a precompiled binary. 28 | 29 | #### Without Bun 30 | 31 | ```bash 32 | wget https://github.com/leona/helix-gpt/releases/download/0.34/helix-gpt-0.34-x86_64-linux.tar.gz \ 33 | -O /tmp/helix-gpt.tar.gz \ 34 | && tar -zxvf /tmp/helix-gpt.tar.gz \ 35 | && mv helix-gpt-0.34-x86_64-linux /usr/bin/helix-gpt \ 36 | && chmod +x /usr/bin/helix-gpt 37 | ``` 38 | 39 | #### With Bun (tested with 1.0.25) 40 | 41 | ```bash 42 | wget https://github.com/leona/helix-gpt/releases/download/0.34/helix-gpt-0.34.js -O /usr/bin/helix-gpt 43 | ``` 44 | 45 | ### Configuration 46 | 47 | You can configure helix-gpt by exposing either the environment variables below, or by passing the command line options directly to helix-gpt in the Helix configuration step. 48 | 49 | [All configuration options](https://github.com/leona/helix-gpt/blob/master/src/config.ts) 50 | 51 | NOTE: Copilot is the best choice due to the model and implementation. 52 | 53 | #### Environment Variables 54 | 55 | ```bash 56 | OPENAI_API_KEY=123 # Required if using openai handler 57 | COPILOT_API_KEY=123 # Required if using copilot handler 58 | CODEIUM_API_KEY=123 # Not required, will use public API key otherwise. 59 | HANDLER=openai # openai/copilot/codeium 60 | ``` 61 | 62 | #### Command Line Arguments 63 | 64 | (Add to `command = "helix-gpt"` in Helix configuration) 65 | 66 | ```bash 67 | --handler openai --openaiKey 123 68 | ``` 69 | 70 | You can also use: 71 | 72 | ```bash 73 | helix-gpt --authCopilot 74 | ``` 75 | 76 | To fetch your Copilot token. 77 | 78 | ### Helix Configuration 79 | 80 | Example for TypeScript `.helix/languages.toml` tested with Helix 23.10 (older versions may not support multiple LSPs) 81 | 82 | ```toml 83 | [language-server.gpt] 84 | command = "helix-gpt" 85 | 86 | [language-server.ts] 87 | command = "typescript-language-server" 88 | args = ["--stdio"] 89 | language-id = "javascript" 90 | 91 | [[language]] 92 | name = "typescript" 93 | language-servers = [ 94 | "ts", 95 | "gpt" 96 | ] 97 | ``` 98 | 99 | In case you opt out of the precompiled binary, modify as follows: 100 | 101 | ```toml 102 | [language-server.gpt] 103 | command = "bun" 104 | args = ["run", "/app/helix-gpt.js"] 105 | ``` 106 | 107 | ### All Done 108 | 109 | If there are any issues, refer to the helix-gpt and Helix log files: 110 | 111 | ```bash 112 | tail -f /root/.cache/helix/helix.log 113 | tail -f /app/helix-gpt.log # Or wherever you set --logFile to 114 | ``` 115 | 116 | ### Special Thanks 117 | 118 | - [rsc1975](https://github.com/rsc1975/bun-docker) for their Bun Dockerfile. 119 | 120 | ### Todo 121 | 122 | - [x] Copilot support 123 | - [x] Resolve diagnostics code action 124 | - Self-hosted model support (partial support if they are openai compliant) 125 | - Inline completion provider (pending support from Helix) 126 | - Single config for all languages (pending [#9318](https://github.com/helix-editor/helix/pull/9318)) 127 | - Support workspace commands to toggle functionality (pending Helix support for merging workspace commands) 128 | - Increase test coverage 129 | - Async load completions to show other language server results immediately (pending Helix support) 130 | - Improve recovery from errors as it can leave the editor unusable sometimes 131 | -------------------------------------------------------------------------------- /assets/example.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leona/helix-gpt/494daf644497e70ec636c74e5c81a3293fec8a86/assets/example.gif -------------------------------------------------------------------------------- /assets/example2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leona/helix-gpt/494daf644497e70ec636c74e5c81a3293fec8a86/assets/example2.gif -------------------------------------------------------------------------------- /assets/template.md: -------------------------------------------------------------------------------- 1 | # helix-gpt 2 | 3 | ![Build Status](https://github.com/leona/helix-gpt/actions/workflows/release.yml/badge.svg) 4 | ![Github Release](https://img.shields.io/badge/release-v-blue) 5 | 6 | Code assistant language server for [Helix](https://github.com/helix-editor/helix) with support for Copilot/OpenAI/Codeium. 7 | 8 | Completion example 9 | 10 | ![helix-gpt example](https://github.com/leona/helix-gpt/raw/master/assets/example.gif) 11 | 12 | Code actions example (space + a) 13 | 14 | ![helix-gpt example](https://github.com/leona/helix-gpt/raw/master/assets/example2.gif) 15 | 16 | Available code actions: `resolveDiagnostics` `generateDocs` `improveCode` `refactorFromComment` `writeTest` 17 | 18 | ### How? 19 | 20 | When a trigger character is pressed it will request a completion and use the entire file as context. 21 | Default triggers characters: `["{", "(", " "]` can be overwritten with `--triggerCharacters "{||(|| "` 22 | 23 | Use `ctrl+x` to manually trigger completions, and `space+a` to trigger code actions that only use the selected code as context. 24 | 25 | ### Install 26 | 27 | This was made to run with [Bun](https://bun.sh/), but you can also use a precompiled binary. 28 | 29 | #### Without Bun 30 | 31 | ```bash 32 | wget https://github.com/leona/helix-gpt/releases/download//helix-gpt--x86_64-linux.tar.gz \ 33 | -O /tmp/helix-gpt.tar.gz \ 34 | && tar -zxvf /tmp/helix-gpt.tar.gz \ 35 | && mv helix-gpt--x86_64-linux /usr/bin/helix-gpt \ 36 | && chmod +x /usr/bin/helix-gpt 37 | ``` 38 | 39 | #### With Bun (tested with 1.0.25) 40 | 41 | ```bash 42 | wget https://github.com/leona/helix-gpt/releases/download//helix-gpt-.js -O /usr/bin/helix-gpt 43 | ``` 44 | 45 | ### Configuration 46 | 47 | You can configure helix-gpt by exposing either the environment variables below, or by passing the command line options directly to helix-gpt in the Helix configuration step. 48 | 49 | [All configuration options](https://github.com/leona/helix-gpt/blob/master/src/config.ts) 50 | 51 | NOTE: Copilot is the best choice due to the model and implementation. 52 | 53 | #### Environment Variables 54 | 55 | ```bash 56 | OPENAI_API_KEY=123 # Required if using openai handler 57 | COPILOT_API_KEY=123 # Required if using copilot handler 58 | CODEIUM_API_KEY=123 # Not required, will use public API key otherwise. 59 | HANDLER=openai # openai/copilot/codeium 60 | ``` 61 | 62 | #### Command Line Arguments 63 | 64 | (Add to `command = "helix-gpt"` in Helix configuration) 65 | 66 | ```bash 67 | --handler openai --openaiKey 123 68 | ``` 69 | 70 | You can also use: 71 | 72 | ```bash 73 | helix-gpt --authCopilot 74 | ``` 75 | 76 | To fetch your Copilot token. 77 | 78 | ### Helix Configuration 79 | 80 | Example for TypeScript `.helix/languages.toml` tested with Helix 23.10 (older versions may not support multiple LSPs) 81 | 82 | ```toml 83 | [language-server.gpt] 84 | command = "helix-gpt" 85 | 86 | [language-server.ts] 87 | command = "typescript-language-server" 88 | args = ["--stdio"] 89 | language-id = "javascript" 90 | 91 | [[language]] 92 | name = "typescript" 93 | language-servers = [ 94 | "ts", 95 | "gpt" 96 | ] 97 | ``` 98 | 99 | In case you opt out of the precompiled binary, modify as follows: 100 | 101 | ```toml 102 | [language-server.gpt] 103 | command = "bun" 104 | args = ["run", "/app/helix-gpt.js"] 105 | ``` 106 | 107 | ### All Done 108 | 109 | If there are any issues, refer to the helix-gpt and Helix log files: 110 | 111 | ```bash 112 | tail -f /root/.cache/helix/helix.log 113 | tail -f /app/helix-gpt.log # Or wherever you set --logFile to 114 | ``` 115 | 116 | ### Special Thanks 117 | 118 | - [rsc1975](https://github.com/rsc1975/bun-docker) for their Bun Dockerfile. 119 | 120 | ### Todo 121 | 122 | - [x] Copilot support 123 | - [x] Resolve diagnostics code action 124 | - Self-hosted model support (partial support if they are openai compliant) 125 | - Inline completion provider (pending support from Helix) 126 | - Single config for all languages (pending [#9318](https://github.com/helix-editor/helix/pull/9318)) 127 | - Support workspace commands to toggle functionality (pending Helix support for merging workspace commands) 128 | - Increase test coverage 129 | - Async load completions to show other language server results immediately (pending Helix support) 130 | - Improve recovery from errors as it can leave the editor unusable sometimes 131 | -------------------------------------------------------------------------------- /bun.lockb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leona/helix-gpt/494daf644497e70ec636c74e5c81a3293fec8a86/bun.lockb -------------------------------------------------------------------------------- /bunfig.toml: -------------------------------------------------------------------------------- 1 | smol = true 2 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | 3 | services: 4 | dev: 5 | container_name: helix-gpt_dev 6 | env_file: .env 7 | build: 8 | dockerfile: ./Dockerfile 9 | command: bash -c "sleep 999999s" 10 | restart: unless-stopped 11 | working_dir: /app 12 | ports: 13 | - 6499:6499 14 | logging: 15 | options: 16 | max-size: "10m" 17 | max-file: "3" 18 | volumes: 19 | - ./:/app 20 | build: 21 | container_name: helix-gpt_build 22 | build: 23 | dockerfile: ./Dockerfile 24 | command: bash -c "bun install; bun run build:smol; bun run build:bin" 25 | working_dir: /app 26 | logging: 27 | options: 28 | max-size: "10m" 29 | max-file: "3" 30 | volumes: 31 | - ./:/app 32 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "helix-gpt", 3 | "module": "src/app.ts", 4 | "type": "module", 5 | "scripts": { 6 | "dev": "bun --inspect=0.0.0.0:6499 run src/app.ts", 7 | "build:bin": "bun build ./src/app.ts --outfile=./dist/helix-gpt --compile --minify", 8 | "build:smol": "bun build ./src/app.ts --outfile=./dist/helix-gpt.js --minify --target bun; echo '#!/usr/bin/env bun' > ./dist/temp.js && cat ./dist/helix-gpt.js >> ./dist/temp.js && mv ./dist/temp.js ./dist/helix-gpt.js", 9 | "test": "TEST_RUNNER=true bun test" 10 | }, 11 | "devDependencies": { 12 | "@types/bun": "latest" 13 | }, 14 | "peerDependencies": { 15 | "typescript": "^5.0.0" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/app.ts: -------------------------------------------------------------------------------- 1 | import Lsp from "./models/lsp" 2 | import { commands } from "./constants" 3 | import * as handlers from "./events" 4 | import { log } from "./utils" 5 | import config from "./config" 6 | import assistant from "./models/assistant" 7 | import copilotAuth from "./models/copilot-auth" 8 | import codeiumAuth from "./models/codeium-auth" 9 | import Github from "./providers/github" 10 | import Openai from "./providers/openai" 11 | import Codeium from "./providers/codeium" 12 | import Ollama from "./providers/ollama" 13 | 14 | if (config.authCopilot) { 15 | await copilotAuth() 16 | process.exit(0) 17 | } 18 | 19 | if (config.authCodeium) { 20 | await codeiumAuth() 21 | process.exit(0) 22 | } 23 | 24 | assistant.registerProvider("copilot", new Github()) 25 | assistant.registerProvider("openai", new Openai()) 26 | assistant.registerProvider("codeium", new Codeium()) 27 | assistant.registerProvider("ollama", new Ollama()) 28 | 29 | const lsp = new Lsp.Service({ 30 | capabilities: { 31 | codeActionProvider: true, 32 | executeCommandProvider: { 33 | commands: commands.map(i => i.key) 34 | }, 35 | completionProvider: { 36 | resolveProvider: false, 37 | triggerCharacters: config.triggerCharacters 38 | }, 39 | textDocumentSync: { 40 | change: 1, 41 | openClose: true 42 | } 43 | } 44 | }) 45 | 46 | lsp.registerEventHandlers(handlers) 47 | 48 | try { 49 | await lsp.start() 50 | } catch (e) { 51 | log("lsp-service error", e.message) 52 | } 53 | -------------------------------------------------------------------------------- /src/config.ts: -------------------------------------------------------------------------------- 1 | import { parseArgs } from "util"; 2 | import { context } from "./constants"; 3 | 4 | const { values } = parseArgs({ 5 | args: Bun.argv, 6 | options: { 7 | numSuggestions: { 8 | type: "string", 9 | default: "2", 10 | }, 11 | logFile: { 12 | type: "string", 13 | default: Bun.env.LOG_FILE ?? "", 14 | }, 15 | handler: { 16 | type: "string", 17 | default: Bun.env.HANDLER ?? "openai", 18 | }, 19 | debounce: { 20 | type: "string", 21 | default: Bun.env.DEBOUNCE ?? "200", 22 | }, 23 | triggerCharacters: { 24 | type: "string", 25 | default: Bun.env.TRIGGER_CHARACTERS ?? "{||(|| ", 26 | }, 27 | openaiKey: { 28 | type: "string", 29 | default: Bun.env.OPENAI_API_KEY, 30 | }, 31 | openaiContext: { 32 | type: "string", 33 | default: Bun.env.OPENAI_CONTEXT?.length 34 | ? Bun.env.OPENAI_CONTEXT 35 | : context.openai, 36 | }, 37 | openaiModel: { 38 | type: "string", 39 | default: Bun.env.OPENAI_MODEL ?? "gpt-3.5-turbo-16k", 40 | }, 41 | openaiMaxTokens: { 42 | type: "string", 43 | default: Bun.env.OPENAI_MAX_TOKENS ?? "7000", 44 | }, 45 | openaiEndpoint: { 46 | type: "string", 47 | default: Bun.env.OPENAI_ENDPOINT ?? "https://api.openai.com/v1", 48 | }, 49 | copilotEndpoint: { 50 | type: "string", 51 | default: Bun.env.GITHUB_ENDPOINT ?? "https://api.githubcopilot.com", 52 | }, 53 | copilotContext: { 54 | type: "string", 55 | default: Bun.env.COPILOT_CONTEXT?.length 56 | ? Bun.env.COPILOT_CONTEXT 57 | : context.copilot, 58 | }, 59 | copilotModel: { 60 | type: "string", 61 | default: Bun.env.COPILOT_MODEL ?? "gpt-3.5-turbo", 62 | }, 63 | copilotApiKey: { 64 | type: "string", 65 | default: Bun.env.COPILOT_API_KEY, 66 | }, 67 | authCopilot: { 68 | type: "boolean", 69 | default: false, 70 | }, 71 | authCodeium: { 72 | type: "boolean", 73 | default: false, 74 | }, 75 | codeiumApiKey: { 76 | type: "string", 77 | default: 78 | Bun.env.CODEIUM_API_KEY ?? "d49954eb-cfba-4992-980f-d8fb37f0e942", // Public Codeium key 79 | }, 80 | fetchTimeout: { 81 | type: "string", 82 | default: Bun.env.FETCH_TIMEOUT ?? "10000", 83 | }, 84 | actionTimeout: { 85 | type: "string", 86 | default: Bun.env.ACTION_TIMEOUT ?? "10000", 87 | }, 88 | completionTimeout: { 89 | type: "string", 90 | default: Bun.env.COMPLETION_TIMEOUT ?? "10000", 91 | }, 92 | ollamaEndpoint: { 93 | type: "string", 94 | // use 127.0.0.1 instead of localhost for issue with bun 95 | // see: https://github.com/oven-sh/bun/issues/1425 96 | default: Bun.env.OLLAMA_ENDPOINT ?? "http://127.0.0.1:11434", 97 | }, 98 | ollamaModel: { 99 | type: "string", 100 | default: Bun.env.OLLAMA_MODEL ?? "codellama", 101 | }, 102 | ollamaContext: { 103 | type: "string", 104 | default: Bun.env.OLLAMA_CONTEXT?.length 105 | ? Bun.env.OLLAMA_CONTEXT 106 | : context.ollama, 107 | }, 108 | ollamaTimeout: { 109 | type: "string", 110 | default: Bun.env.OLLAMA_TIMEOUT ?? "60000", 111 | }, 112 | }, 113 | strict: true, 114 | allowPositionals: true, 115 | }); 116 | 117 | if ( 118 | !Bun.env.TEST_RUNNER?.length && 119 | !values.openaiKey?.length && 120 | !values.copilotApiKey?.length && 121 | !values.authCopilot && 122 | !values.authCodeium && 123 | values.handler !== "codeium" && 124 | values.handler !== "ollama" 125 | ) { 126 | throw new Error("no handler key provided"); 127 | } 128 | 129 | export default { 130 | ...values, 131 | triggerCharacters: (values.triggerCharacters as string).split("||"), 132 | debounce: parseInt(values.debounce as string), 133 | fetchTimeout: parseInt(values.fetchTimeout as string), 134 | actionTimeout: parseInt(values.actionTimeout as string), 135 | completionTimeout: parseInt(values.completionTimeout as string), 136 | numSuggestions: parseInt(values.numSuggestions as string), 137 | }; 138 | -------------------------------------------------------------------------------- /src/constants.ts: -------------------------------------------------------------------------------- 1 | export const context = { 2 | openai: `Continue the input code from the language . Only respond with code.`, 3 | copilot: ` completions. Only respond with code.`, 4 | ollama: `Continue the input code from the language . Only respond with code.` 5 | } 6 | 7 | export const examples = [ 8 | { 9 | role: "user", 10 | content: `function randomInt(` 11 | }, 12 | { 13 | role: "assistant", 14 | content: `min: number, max: number): number { 15 | min = Math.ceil(min); 16 | max = Math.floor(max); 17 | return Math.floor(Math.random() * (max - min + 1)) + min; 18 | }` 19 | } 20 | ] 21 | 22 | export const commands = [ 23 | { 24 | key: "resolveDiagnostics", 25 | label: "Resolve diagnostics", 26 | query: "Resolve the diagnostics for this code." 27 | }, 28 | { 29 | key: "generateDocs", 30 | label: "Generate documentation", 31 | query: "Add documentation to this code." 32 | }, 33 | { 34 | key: "improveCode", 35 | label: "Improve code", 36 | query: "Improve this code." 37 | }, 38 | { 39 | key: "refactorFromComment", 40 | label: "Refactor code from a comment", 41 | query: "Refactor this code based on the comment." 42 | }, 43 | { 44 | key: "writeTest", 45 | label: "Write a unit test", 46 | query: "Write a unit test for this code. Do not include any imports.", 47 | } 48 | ] 49 | -------------------------------------------------------------------------------- /src/events/actions.ts: -------------------------------------------------------------------------------- 1 | import { Service } from "../models/lsp" 2 | import { Event, DiagnosticSeverity } from "../models/lsp.types" 3 | import { commands } from "../constants" 4 | import assistant from "../models/assistant" 5 | import { log } from "../utils" 6 | import config from "../config"; 7 | 8 | export const actions = (lsp: Service) => { 9 | lsp.on(Event.ExecuteCommand, async ({ ctx, request }) => { 10 | const { command } = request.params 11 | const { diagnostics, range } = request.params.arguments[0] 12 | let { query } = request.params.arguments[0] 13 | 14 | ctx.sendDiagnostics([ 15 | { 16 | message: `Executing ${command}...`, 17 | range, 18 | severity: DiagnosticSeverity.Information 19 | } 20 | ], config.actionTimeout) 21 | 22 | const content = ctx.getContentFromRange(range) 23 | const padding = ctx.getContentPadding(content) 24 | const buffer = ctx.buffers[ctx.currentUri] 25 | log("chat request content:", content) 26 | 27 | if (diagnostics?.length) { 28 | query += "\n\nDiagnostics: " + diagnostics.join("\n- ") 29 | } 30 | 31 | try { 32 | var { result } = await assistant.chat(query, content, ctx.currentUri as string, buffer?.languageId as string) 33 | 34 | if (!result?.length) { 35 | throw new Error("No completion found") 36 | } 37 | } catch (e) { 38 | log("chat failed", e.message) 39 | 40 | return ctx.sendDiagnostics([{ 41 | message: e.message, 42 | severity: DiagnosticSeverity.Error, 43 | range 44 | }], config.actionTimeout) 45 | } 46 | 47 | result = ctx.padContent(result.trim(), padding) + "\n" 48 | log("received chat result:", result) 49 | 50 | ctx.send({ 51 | method: Event.ApplyEdit, 52 | id: request.id, 53 | params: { 54 | label: command, 55 | edit: { 56 | changes: { 57 | [ctx.currentUri as string]: [{ 58 | range, 59 | newText: result 60 | }] 61 | } 62 | } 63 | } 64 | }) 65 | 66 | ctx.resetDiagnostics() 67 | }) 68 | 69 | lsp.on(Event.CodeAction, ({ ctx, request }) => { 70 | ctx.currentUri = request.params.textDocument.uri 71 | 72 | ctx.send({ 73 | id: request.id, 74 | result: commands.map(i => ({ 75 | title: i.label, 76 | kind: "quickfix", 77 | diagnostics: [], 78 | command: { 79 | title: i.label, 80 | command: i.key, 81 | arguments: [{ 82 | range: request.params.range, 83 | query: i.query, 84 | diagnostics: request.params.context?.diagnostics?.map(i => i.message) 85 | }] 86 | } 87 | })) 88 | }) 89 | }) 90 | } 91 | -------------------------------------------------------------------------------- /src/events/completions.ts: -------------------------------------------------------------------------------- 1 | import { Service } from "../models/lsp"; 2 | import { Event, DiagnosticSeverity } from "../models/lsp.types"; 3 | import { debounce, log, getContent } from "../utils"; 4 | import assistant from "../models/assistant"; 5 | import config from "../config"; 6 | 7 | export const completions = (lsp: Service) => { 8 | lsp.on(Event.Completion, async ({ ctx, request }) => { 9 | const buffer = ctx.buffers[request.params.textDocument.uri]; 10 | const lastContentVersion = buffer.version; 11 | const { lastCharacter } = await getContent( 12 | buffer.text, 13 | request.params.position.line, 14 | request.params.position.character, 15 | ); 16 | 17 | if (lastCharacter == ".") { 18 | return ctx.send({ 19 | id: request.id, 20 | result: { 21 | isIncomplete: false, 22 | items: [], 23 | }, 24 | }); 25 | } 26 | 27 | debounce( 28 | "completion", 29 | async () => { 30 | try { 31 | await completion({ ctx, request, lastContentVersion }); 32 | } catch (e) { 33 | log("error in completion event", e.message); 34 | ctx.sendDiagnostics( 35 | [ 36 | { 37 | message: e.message, 38 | severity: DiagnosticSeverity.Error, 39 | range: { 40 | start: { line: request.params.position.line, character: 0 }, 41 | end: { line: request.params.position.line + 1, character: 0 }, 42 | }, 43 | }, 44 | ], 45 | config.completionTimeout, 46 | ); 47 | } 48 | }, 49 | config.debounce, 50 | ); 51 | }); 52 | 53 | const completion = async ({ ctx, request, lastContentVersion }) => { 54 | const skip = () => { 55 | ctx.resetDiagnostics(); 56 | 57 | ctx.send({ 58 | id: request.id, 59 | result: { 60 | isIncomplete: false, 61 | items: [], 62 | }, 63 | }); 64 | }; 65 | 66 | const buffer = ctx.buffers[request.params.textDocument.uri]; 67 | log("running completion on buffer", JSON.stringify(buffer)); 68 | 69 | if (buffer.version > lastContentVersion) { 70 | log("skipping because content is stale"); 71 | return skip(); 72 | } 73 | 74 | const { lastLine, contentBefore, contentAfter, contentImmediatelyAfter } = 75 | await getContent( 76 | buffer.text, 77 | request.params.position.line, 78 | request.params.position.character, 79 | ); 80 | log("calling completion event"); 81 | 82 | ctx.sendDiagnostics( 83 | [ 84 | { 85 | message: "Fetching completion...", 86 | severity: DiagnosticSeverity.Information, 87 | range: { 88 | start: { line: request.params.position.line, character: 0 }, 89 | end: { line: request.params.position.line + 1, character: 0 }, 90 | }, 91 | }, 92 | ], 93 | config.completionTimeout, 94 | ); 95 | 96 | try { 97 | var hints = await assistant.completion( 98 | { contentBefore, contentAfter }, 99 | ctx.currentUri, 100 | buffer?.languageId, 101 | ); 102 | } catch (e) { 103 | return ctx.sendDiagnostics( 104 | [ 105 | { 106 | message: e.message, 107 | severity: DiagnosticSeverity.Error, 108 | range: { 109 | start: { line: request.params.position.line, character: 0 }, 110 | end: { line: request.params.position.line + 1, character: 0 }, 111 | }, 112 | }, 113 | ], 114 | config.completionTimeout, 115 | ); 116 | } 117 | 118 | log("completion hints:", hints); 119 | 120 | const items = hints?.map((i: string) => { 121 | i = i.trim(); 122 | if (i.startsWith(lastLine.trim())) { 123 | i = i.slice(lastLine.trim().length).trim(); 124 | } 125 | 126 | const lines = i.split("\n"); 127 | const cleanLine = request.params.position.line + lines.length - 1; 128 | let cleanCharacter = lines.slice(-1)[0].length; 129 | 130 | if (cleanLine == request.params.position.line) { 131 | cleanCharacter += request.params.position.character; 132 | } 133 | 134 | return { 135 | label: lines[0].length > 20 ? lines[0] : i.slice(0, 20).trim(), 136 | kind: 1, 137 | preselect: true, 138 | detail: i, 139 | insertText: i, 140 | insertTextFormat: 1, 141 | additionalTextEdits: [ 142 | { 143 | newText: "", 144 | range: { 145 | start: { line: cleanLine, character: cleanCharacter }, 146 | end: { 147 | line: cleanLine, 148 | character: cleanCharacter + contentImmediatelyAfter?.length, 149 | }, 150 | }, 151 | }, 152 | ], 153 | }; 154 | }); 155 | 156 | ctx.send({ 157 | id: request.id, 158 | result: { 159 | isIncomplete: false, 160 | items, 161 | }, 162 | }); 163 | 164 | ctx.resetDiagnostics(); 165 | }; 166 | }; 167 | -------------------------------------------------------------------------------- /src/events/index.ts: -------------------------------------------------------------------------------- 1 | export * from "./actions" 2 | export * from "./completions" 3 | -------------------------------------------------------------------------------- /src/models/api.ts: -------------------------------------------------------------------------------- 1 | import { log } from "../utils" 2 | import config from "../config"; 3 | 4 | interface Request { 5 | endpoint: string; 6 | method: "GET" | "POST" | "PUT" | "DELETE"; 7 | body?: any; 8 | headers?: Record; 9 | params?: Record; 10 | url?: string; 11 | text?: boolean; 12 | timeout?: number 13 | } 14 | 15 | interface ApiBaseOptions { 16 | url: string; 17 | headers?: Record; 18 | params?: Record; 19 | } 20 | 21 | type RequestInitTimeout = RequestInit & { timeout?: number }; 22 | 23 | export default class ApiBase { 24 | private url: string; 25 | private headers: Record; 26 | private params: Record; 27 | private controller: AbortController; 28 | 29 | constructor({ url, headers, params }: ApiBaseOptions) { 30 | this.url = url; 31 | this.headers = headers || {}; 32 | this.params = params || {}; 33 | this.controller = new AbortController(); 34 | } 35 | 36 | async fetch( 37 | url: string, 38 | options: RequestInitTimeout, 39 | timeout: number = config.fetchTimeout): Promise { 40 | return new Promise(async (resolve, reject) => { 41 | setTimeout(() => { 42 | this.controller.abort() 43 | reject(new Error("timeout")) 44 | }, timeout); 45 | 46 | try { 47 | const response = await fetch(url, options); 48 | resolve(response); 49 | } catch (error: any) { 50 | // we canceled the request on purpose beasue there is a new one, so no need to tell the user 51 | if (error.name === "AbortError") reject({ message: "" }); 52 | reject(error); 53 | } 54 | }); 55 | } 56 | 57 | async request(request: Request): Promise { 58 | const { endpoint, method, body, headers, params, url, timeout } = request; 59 | let requestUrl = new URL(endpoint, url || this.url); 60 | log("fetch", endpoint) 61 | 62 | if (params) { 63 | Object.keys(params).forEach((key) => 64 | requestUrl.searchParams.append(key, params[key]) 65 | ); 66 | } 67 | 68 | Object.keys(this.params).forEach((key) => { 69 | requestUrl.searchParams.append(key, this.params[key]); 70 | }); 71 | 72 | // cancel last pending request 73 | this.controller.abort(); 74 | this.controller = new AbortController(); 75 | 76 | let opts = { 77 | headers: { 78 | ...this.headers, 79 | ...headers, 80 | }, 81 | method, 82 | body: null as any, 83 | signal: this.controller.signal 84 | }; 85 | 86 | if (body) { 87 | opts.body = JSON.stringify(body); 88 | } 89 | 90 | const response = await this.fetch(requestUrl.toString(), opts, timeout); 91 | 92 | if (!response.ok) { 93 | let error = await response.text(); 94 | throw new Error( 95 | `Fetch failed with status ${response.status} body ${error} url: ${request.endpoint}` 96 | ); 97 | } 98 | 99 | log("response", requestUrl, response.status) 100 | 101 | if (request.text) { 102 | return await response.text(); 103 | } 104 | 105 | return await response.json(); 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/models/assistant.ts: -------------------------------------------------------------------------------- 1 | import config from "../config" 2 | import { log } from "../utils" 3 | 4 | interface Provider { 5 | chat?(request: string, contents: any, filepath: string, language: string): Promise 6 | completion?(contents: any, filepath: string, language: string): Promise 7 | } 8 | 9 | const providers: Record = {} 10 | 11 | const registerProvider = (key: string, provider: Provider) => { 12 | providers[key] = provider 13 | } 14 | 15 | const getProvider = (key: string): Provider => { 16 | if (!providers[config.handler]) { 17 | const error = `no provider: ${config.handler}` 18 | log(error) 19 | throw new Error(error) 20 | } 21 | 22 | return providers[config.handler] 23 | } 24 | 25 | const chat = async (...args: any[]) => { 26 | log(config.handler, "chat request", JSON.stringify(args)) 27 | const provider = getProvider(config.handler) 28 | 29 | if (!provider.chat) { 30 | const error = `No chat provider for: ${config.handler}` 31 | log(error) 32 | throw new Error(error) 33 | } 34 | 35 | return provider.chat(...args) 36 | } 37 | 38 | const completion = async (...args: any[]) => { 39 | log(config.handler, "completion request") 40 | const provider = getProvider(config.handler) 41 | 42 | if (!provider.completion) { 43 | const error = `No completion provider for: ${config.handler}` 44 | log(error) 45 | throw new Error(error) 46 | } 47 | 48 | return provider.completion(...args) 49 | } 50 | 51 | export default { chat, completion, registerProvider } 52 | -------------------------------------------------------------------------------- /src/models/codeium-auth.ts: -------------------------------------------------------------------------------- 1 | import Codeium from "../providers/codeium"; 2 | 3 | export default async () => { 4 | const codeium = new Codeium(); 5 | const authUrl = codeium.authUrl() 6 | console.log(`Visit the following URL and enter the token below: ${authUrl}`) 7 | const input = prompt("Token: ") 8 | const apiKey = await codeium.register(input?.trim() as string) 9 | console.log(`\nCodeium API key: ${apiKey}`) 10 | } 11 | -------------------------------------------------------------------------------- /src/models/copilot-auth.ts: -------------------------------------------------------------------------------- 1 | import Github from "../providers/github" 2 | 3 | export default async () => { 4 | const github = new Github() 5 | 6 | const deviceCode = await github.deviceCode() 7 | console.log(`Visit: ${deviceCode.verificationUri} in your browser and enter: ${deviceCode.userCode}`) 8 | 9 | while (true) { 10 | await new Promise(resolve => setTimeout(resolve, 5000)) 11 | const auth = await github.accessToken(deviceCode.deviceCode) 12 | 13 | if (auth?.accessToken?.length) { 14 | console.log("\n\nGot token:", auth.accessToken, "\n\n", "Store this in the COPILOT_API_KEY environment variable") 15 | break 16 | } 17 | 18 | console.log("Waiting for user authorization...") 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/models/lsp.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "bun:test"; 2 | import Lsp from "./lsp" 3 | 4 | test("parseLine", async () => { 5 | const lsp = new Lsp.Service({}) 6 | 7 | const content1 = `Content-Length: 154\r\n{"id": 1}\r\nContent-Length: 200\n` 8 | expect(lsp.parseLine(content1)).toEqual({ id: 1 }) 9 | 10 | const content2 = `Content-Length: 1766\r\n{"jsonrpc":"2.0","method":"initialize","params":{"capabilities":{"general":{"positionEncodings":["utf-8","utf-32","utf-16"]},"textDocument":{"codeAction":{"codeActionLiteralSupport":{"codeActionKind":{"valueSet":["","quickfix","refactor","refactor.extract","refactor.inline","refactor.rewrite","source","source.organizeImports"]}},"dataSupport":true,"disabledSupport":true,"isPreferredSupport":true,"resolveSupport":{"properties":["edit","command"]}},"completion":{"completionItem":{"deprecatedSupport":true,"insertReplaceSupport":true,"resolveSupport":{"properties":["documentation","detail","additionalTextEdits"]},"snippetSupport":true,"tagSupport":{"valueSet":[1]}},"completionItemKind":{}},"hover":{"contentFormat":["markdown"]},"inlayHint":{"dynamicRegistration":false},"publishDiagnostics":{"versionSupport":true},"rename":{"dynamicRegistration":false,"honorsChangeAnnotations":false,"prepareSupport":true},"signatureHelp":{"signatureInformation":{"activeParameterSupport":true,"documentationFormat":["markdown"],"parameterInformation":{"labelOffsetSupport":true}}}},"window":{"workDoneProgress":true},"workspace":{"applyEdit":true,"configuration":true,"didChangeConfiguration":{"dynamicRegistration":false},"didChangeWatchedFiles":{"dynamicRegistration":true,"relativePatternSupport":false},"executeCommand":{"dynamicRegistration":false},"inlayHint":{"refreshSupport":false},"symbol":{"dynamicRegistration":false},"workspaceEdit":{"documentChanges":true,"failureHandling":"abort","normalizesLineEndings":false,"resourceOperations":["create","rename","delete"]},"workspaceFolders":true}},"clientInfo":{"name":"helix","version":"23.10"},"processId":4557,"rootPath":"/app","rootUri":"file:///app","workspaceFolders":[{"name":"app","uri":"file:///app"}]},"id":0}` 11 | expect(lsp.parseLine(content2)).toMatchObject({ method: "initialize" }) 12 | 13 | const content3 = `{"id": 1}\r\nContent-Length: 200\n` 14 | expect(lsp.parseLine(content3)).toEqual({ id: 1 }) 15 | 16 | const content4 = ` registerEventHandlers(handlers: Record void>) { 17 | Object.values(handlers).forEach((i: (lsp: IService) => void) => { 18 | i(this) 19 | }) 20 | } 21 | ` 22 | const padding = lsp.getContentPadding(content4) 23 | expect(padding).toEqual(2) 24 | 25 | const content5 = `const example = 1` 26 | const padding2 = lsp.getContentPadding(content5) 27 | expect(padding2).toEqual(0) 28 | }) 29 | -------------------------------------------------------------------------------- /src/models/lsp.ts: -------------------------------------------------------------------------------- 1 | import EventEmitter from "node:events" 2 | import { log } from "../utils" 3 | import type { Buffer, Range, Diagnostic, EventRequest } from "./lsp.types" 4 | import { Event, DiagnosticSeverity } from "./lsp.types" 5 | 6 | export class Service { 7 | emitter: EventEmitter 8 | capabilities: any 9 | currentUri?: string 10 | buffers: Record 11 | 12 | constructor({ capabilities }: { capabilities: any }) { 13 | this.emitter = new EventEmitter({ captureRejections: true }) 14 | this.capabilities = capabilities 15 | this.buffers = {} 16 | log("triggerCharacters:", JSON.stringify(capabilities?.completionProvider?.triggerCharacters)) 17 | this.registerDefault() 18 | } 19 | 20 | registerDefault() { 21 | this.emitter.on("error", (e) => { 22 | log("lsp-event-emitter error", e.message) 23 | }) 24 | 25 | this.on(Event.Initialize, async ({ ctx, request }) => { 26 | ctx.send({ 27 | id: request.id, 28 | result: { 29 | capabilities: this.capabilities 30 | } 31 | }) 32 | }) 33 | 34 | this.on(Event.DidOpen, ({ ctx, request }) => { 35 | const { uri, text, languageId } = request.params.textDocument 36 | 37 | this.buffers[uri] = { 38 | uri, text, languageId, version: 0 39 | } 40 | 41 | this.currentUri = uri 42 | log("received didOpen", `language: ${languageId}`) 43 | }) 44 | 45 | this.on(Event.Shutdown, () => { 46 | log("received shutdown request") 47 | process.exit(0) 48 | }) 49 | 50 | this.on(Event.DidChange, async ({ ctx, request }) => { 51 | const { uri, version } = request.params.textDocument 52 | this.buffers[uri] = { ...this.buffers[uri], version, text: request.params.contentChanges[0].text } 53 | this.currentUri = uri 54 | log("received didChange", `language: ${this.buffers[uri].languageId}`, `contentVersion: ${version}`, `uri: ${uri}`) 55 | }) 56 | } 57 | 58 | getContentPadding(text: string): number { 59 | const lines = text.split("\n") 60 | 61 | const smallestPadding = lines.reduce((acc, line) => { 62 | const trimmedLine = line.trim(); 63 | if (trimmedLine.length === 0) return acc; 64 | const padding = line.match(/^\s+/)?.[0].length || 0; 65 | return Math.min(padding, acc); 66 | }, 99999); 67 | 68 | return smallestPadding 69 | } 70 | 71 | padContent(text: string, padding: number): string { 72 | return text.split("\n").map((line) => { 73 | if (line.trim().length === 0) return line 74 | return " ".repeat(padding) + line 75 | }).join("\n") 76 | } 77 | 78 | registerEventHandlers(handlers: Record void>) { 79 | Object.values(handlers).forEach((i: (lsp: Service) => void) => { 80 | i(this) 81 | }) 82 | } 83 | 84 | getContentFromRange(range: Range): string { 85 | log("getting content from range", JSON.stringify(range), `uri: ${this.currentUri}`, `current buffers: ${JSON.stringify(Object.keys(this.buffers))}`) 86 | const { start, end } = range 87 | return this.buffers[this.currentUri]?.text?.split("\n")?.slice(start.line, end.line).join("\n") 88 | } 89 | 90 | positionalUpdate(uri: string, text: string, range: Range) { 91 | const buffer = this.buffers[uri] 92 | const lines = buffer?.text?.split("\n") 93 | const start = range.start.line 94 | const end = range.end.line 95 | const startLine = lines[start] 96 | const endLine = lines[end] 97 | const startLineStart = startLine?.substring(0, range.start.character) 98 | const endLineEnd = endLine?.substring(range.end.character) 99 | const newLines = [startLineStart + text + endLineEnd] 100 | 101 | const newContents = lines.reduce((acc, line, index) => { 102 | if (index < start || index > end) { 103 | acc.push(line) 104 | } else if (index === start) { 105 | acc.push(newLines[0]) 106 | } 107 | return acc 108 | }, []) 109 | 110 | this.buffers[uri].text = newContents.join("\n") 111 | } 112 | 113 | on(event: string, callback: (request: EventRequest) => Promise | undefined) { 114 | this.emitter.on(event, async (request) => { 115 | try { 116 | await callback({ ctx: this, request }) 117 | } catch (e) { 118 | log("error in event", JSON.stringify(request), e.message) 119 | } 120 | }) 121 | } 122 | 123 | send({ method, id, result, params }: { method?: Event, id?: number, result?: any, params?: any }) { 124 | const request = JSON.stringify({ 125 | jsonrpc: "2.0", 126 | method, 127 | id, 128 | result, 129 | params 130 | }) 131 | 132 | const len = (new TextEncoder()).encode(request).length 133 | process.stdout.write(`Content-Length: ${len}\r\n\r\n${request}`) 134 | log("sent request", request) 135 | } 136 | 137 | sendDiagnostics(diagnostics: Diagnostic[], timeout: number = 0) { 138 | log("sending diagnostics", JSON.stringify(diagnostics)) 139 | 140 | const params = { 141 | uri: this.currentUri, 142 | diagnostics: diagnostics.map((i) => { 143 | i.source = "helix-gpt" 144 | return i 145 | }) 146 | } 147 | 148 | this.send({ 149 | method: Event.PublishDiagnostics, 150 | params 151 | }) 152 | 153 | if (timeout > 0) { 154 | setTimeout(() => { 155 | this.send({ 156 | method: Event.PublishDiagnostics, 157 | params: { 158 | uri: this.currentUri, 159 | diagnostics: [] 160 | } 161 | }) 162 | }, timeout) 163 | } 164 | } 165 | 166 | resetDiagnostics() { 167 | this.send({ 168 | method: Event.PublishDiagnostics, 169 | params: { 170 | uri: this.currentUri, 171 | diagnostics: [] 172 | } 173 | }) 174 | } 175 | 176 | parseLine(line: string) { 177 | const components = line.split('\r\n') 178 | 179 | for (const data of components) { 180 | try { 181 | return JSON.parse(data) 182 | } catch (e) { } 183 | } 184 | 185 | throw new Error("failed to parse") 186 | } 187 | 188 | async receiveLine(line: string) { 189 | try { 190 | const request = this.parseLine(line) 191 | 192 | if (![Event.DidChange, Event.DidOpen].includes(request.method)) { 193 | log("received request:", JSON.stringify(request)) 194 | } 195 | 196 | this.emitter.emit(request.method, request) 197 | } catch (e) { 198 | log("failed to parse line:", e.message, line) 199 | } 200 | } 201 | 202 | 203 | async start() { 204 | for await (const chunk of Bun.stdin.stream()) { 205 | const chunkText = Buffer.from(chunk).toString(); 206 | this.receiveLine(chunkText) 207 | } 208 | } 209 | } 210 | 211 | export default { 212 | Service, Event, DiagnosticSeverity 213 | } 214 | -------------------------------------------------------------------------------- /src/models/lsp.types.ts: -------------------------------------------------------------------------------- 1 | import EventEmitter from "node:events" 2 | import { Service } from "./lsp" 3 | 4 | export enum Event { 5 | DidOpen = "textDocument/didOpen", 6 | DidChange = "textDocument/didChange", 7 | Completion = "textDocument/completion", 8 | CodeAction = "textDocument/codeAction", 9 | ApplyEdit = "workspace/applyEdit", 10 | ExecuteCommand = "workspace/executeCommand", 11 | Initialize = "initialize", 12 | Shutdown = "shutdown", 13 | Exit = "exit", 14 | PublishDiagnostics = "textDocument/publishDiagnostics", 15 | } 16 | 17 | export enum DiagnosticSeverity { 18 | Error = 1, 19 | Warning = 2, 20 | Information = 3, 21 | Hint = 4, 22 | } 23 | 24 | export type Position = { 25 | line: number, 26 | character: number 27 | } 28 | 29 | export type Range = { 30 | start: Position, 31 | end: Position 32 | } 33 | 34 | export type Diagnostic = { 35 | message: string, 36 | range: Range, 37 | source?: string, 38 | severity?: DiagnosticSeverity 39 | } 40 | 41 | export type EventRequest = { 42 | ctx: Service 43 | request: any, 44 | } 45 | 46 | export type Buffer = { 47 | text: string, 48 | version: number, 49 | languageId: string, 50 | uri: string, 51 | } 52 | 53 | -------------------------------------------------------------------------------- /src/providers/codeium.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "bun:test"; 2 | import Codeium from "./codeium" 3 | 4 | const codeium = new Codeium() 5 | 6 | test("completion", async () => { 7 | (() => 8 | Promise.resolve({ 9 | json: () => Promise.resolve({ 10 | "state": { 11 | "state": "CODEIUM_STATE_SUCCESS", 12 | "message": "Generated 6 completions" 13 | }, 14 | "completionItems": [ 15 | { 16 | "completion": { 17 | "completionId": "4872d12f-1931-4f5d-9848-a2feee99da2e", 18 | "text": "const alphabet = 'abcdefghijklmnopqrstuvwxyz'", 19 | "stop": "\n\n", 20 | "score": -1.3279907496119012, 21 | "tokens": [ 22 | "6", 23 | "68612", 24 | "3961" 25 | ], 26 | "decodedTokens": [ 27 | "'", 28 | "abcdefghijklmnopqrstuvwxyz", 29 | "'\n\n" 30 | ], 31 | "probabilities": [ 32 | 0.2410283237695694, 33 | 0.634307324886322, 34 | 0.21950317919254303 35 | ], 36 | "adjustedProbabilities": [ 37 | 0.17243333160877228, 38 | 0.9864425659179688, 39 | 0.15022589266300201 40 | ], 41 | "generatedLength": "3", 42 | "stopReason": "STOP_REASON_STOP_PATTERN", 43 | "originalText": "'abcdefghijklmnopqrstuvwxyz'\n\n" 44 | }, 45 | "range": { 46 | "endOffset": "17", 47 | "startPosition": {}, 48 | "endPosition": { 49 | "col": "17" 50 | } 51 | }, 52 | "source": "COMPLETION_SOURCE_NETWORK", 53 | "completionParts": [ 54 | { 55 | "text": "'abcdefghijklmnopqrstuvwxyz'", 56 | "offset": "17", 57 | "type": "COMPLETION_PART_TYPE_INLINE", 58 | "prefix": "const alphabet = " 59 | }, 60 | { 61 | "text": "'abcdefghijklmnopqrstuvwxyz'", 62 | "offset": "17", 63 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 64 | "prefix": "const alphabet = " 65 | } 66 | ] 67 | }, 68 | { 69 | "completion": { 70 | "completionId": "4eab8e74-ecdb-461b-9932-b08e172b4426", 71 | "text": "const alphabet = \"abcdefghijklmnopqrstuvwxyz\"\nconst numbers = \"0123456789\"", 72 | "stop": "<|endofmiddle|>", 73 | "score": -1.7445913846352432, 74 | "tokens": [ 75 | "1", 76 | "68612", 77 | "702", 78 | "1040", 79 | "5219", 80 | "284", 81 | "330", 82 | "11531", 83 | "12901", 84 | "17458", 85 | "24", 86 | "702", 87 | "100299" 88 | ], 89 | "decodedTokens": [ 90 | "\"", 91 | "abcdefghijklmnopqrstuvwxyz", 92 | "\"\n", 93 | "const", 94 | " numbers", 95 | " =", 96 | " \"", 97 | "012", 98 | "345", 99 | "678", 100 | "9", 101 | "\"\n", 102 | "<|endofmiddle|>" 103 | ], 104 | "probabilities": [ 105 | 0.4467959403991699, 106 | 0.5885083675384521, 107 | 0.43643850088119507, 108 | 0.7753089666366577, 109 | 0.13237497210502625, 110 | 0.9959436058998108, 111 | 0.5902878642082214, 112 | 0.6605963706970215, 113 | 0.9968718886375427, 114 | 0.99399334192276, 115 | 0.9554901123046875, 116 | 0.5480461716651917, 117 | 0.06993699073791504 118 | ], 119 | "adjustedProbabilities": [ 120 | 0.8067222237586975, 121 | 0.9805918335914612, 122 | 0.828681468963623, 123 | 0.998873770236969, 124 | 0.16544978320598602, 125 | 1, 126 | 0.8822909593582153, 127 | 0.8479676246643066, 128 | 0.9999998807907104, 129 | 0.9999998807907104, 130 | 0.9998165965080261, 131 | 0.7556309103965759, 132 | 1 133 | ], 134 | "generatedLength": "13", 135 | "stopReason": "STOP_REASON_STOP_PATTERN", 136 | "originalText": "\"abcdefghijklmnopqrstuvwxyz\"\nconst numbers = \"0123456789\"\n<|endofmiddle|>" 137 | }, 138 | "range": { 139 | "endOffset": "17", 140 | "startPosition": {}, 141 | "endPosition": { 142 | "col": "17" 143 | } 144 | }, 145 | "source": "COMPLETION_SOURCE_NETWORK", 146 | "completionParts": [ 147 | { 148 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 149 | "offset": "17", 150 | "type": "COMPLETION_PART_TYPE_INLINE", 151 | "prefix": "const alphabet = " 152 | }, 153 | { 154 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 155 | "offset": "17", 156 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 157 | "prefix": "const alphabet = " 158 | }, 159 | { 160 | "text": "const numbers = \"0123456789\"", 161 | "offset": "17", 162 | "type": "COMPLETION_PART_TYPE_BLOCK" 163 | } 164 | ] 165 | }, 166 | { 167 | "completion": { 168 | "completionId": "27b2a63d-3b06-472b-ae08-62711cffc6e1", 169 | "text": "const alphabet = \"abcdefghijklmnopqrstuvwxyz\"\nconst number = 123", 170 | "stop": "<|endofmiddle|>", 171 | "score": -1.8225764230508985, 172 | "tokens": [ 173 | "1", 174 | "68612", 175 | "702", 176 | "1040", 177 | "1396", 178 | "284", 179 | "220", 180 | "4513", 181 | "100299" 182 | ], 183 | "decodedTokens": [ 184 | "\"", 185 | "abcdefghijklmnopqrstuvwxyz", 186 | "\"\n", 187 | "const", 188 | " number", 189 | " =", 190 | " ", 191 | "123", 192 | "<|endofmiddle|>" 193 | ], 194 | "probabilities": [ 195 | 0.4467959403991699, 196 | 0.5885083675384521, 197 | 0.43643850088119507, 198 | 0.7753089666366577, 199 | 0.11147090047597885, 200 | 0.9744853973388672, 201 | 0.8618786334991455, 202 | 0.6927213668823242, 203 | 0.07997503876686096 204 | ], 205 | "adjustedProbabilities": [ 206 | 0.8067222237586975, 207 | 0.9805918335914612, 208 | 0.828681468963623, 209 | 0.998873770236969, 210 | 0.10766022652387619, 211 | 0.9999990463256836, 212 | 0.9961197376251221, 213 | 0.9964505434036255, 214 | 1 215 | ], 216 | "generatedLength": "9", 217 | "stopReason": "STOP_REASON_STOP_PATTERN", 218 | "originalText": "\"abcdefghijklmnopqrstuvwxyz\"\nconst number = 123<|endofmiddle|>" 219 | }, 220 | "range": { 221 | "endOffset": "17", 222 | "startPosition": {}, 223 | "endPosition": { 224 | "col": "17" 225 | } 226 | }, 227 | "source": "COMPLETION_SOURCE_NETWORK", 228 | "completionParts": [ 229 | { 230 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 231 | "offset": "17", 232 | "type": "COMPLETION_PART_TYPE_INLINE", 233 | "prefix": "const alphabet = " 234 | }, 235 | { 236 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 237 | "offset": "17", 238 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 239 | "prefix": "const alphabet = " 240 | }, 241 | { 242 | "text": "const number = 123", 243 | "offset": "17", 244 | "type": "COMPLETION_PART_TYPE_BLOCK" 245 | } 246 | ] 247 | }, 248 | { 249 | "completion": { 250 | "completionId": "714c6488-f5e0-41d3-a0c1-daabe410f694", 251 | "text": "const alphabet = \"abcdefghijklmnopqrstuvwxyz\"", 252 | "stop": "<|endofmiddle|>", 253 | "score": -1.9039497503356135, 254 | "tokens": [ 255 | "1", 256 | "68612", 257 | "1", 258 | "100299" 259 | ], 260 | "decodedTokens": [ 261 | "\"", 262 | "abcdefghijklmnopqrstuvwxyz", 263 | "\"", 264 | "<|endofmiddle|>" 265 | ], 266 | "probabilities": [ 267 | 0.4467959403991699, 268 | 0.5885083675384521, 269 | 0.1405881941318512, 270 | 0.4958511292934418 271 | ], 272 | "adjustedProbabilities": [ 273 | 0.8067222237586975, 274 | 0.9805918335914612, 275 | 0.048803623765707016, 276 | 1 277 | ], 278 | "generatedLength": "4", 279 | "stopReason": "STOP_REASON_STOP_PATTERN", 280 | "originalText": "\"abcdefghijklmnopqrstuvwxyz\"<|endofmiddle|>" 281 | }, 282 | "range": { 283 | "endOffset": "17", 284 | "startPosition": {}, 285 | "endPosition": { 286 | "col": "17" 287 | } 288 | }, 289 | "source": "COMPLETION_SOURCE_NETWORK", 290 | "completionParts": [ 291 | { 292 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 293 | "offset": "17", 294 | "type": "COMPLETION_PART_TYPE_INLINE", 295 | "prefix": "const alphabet = " 296 | }, 297 | { 298 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 299 | "offset": "17", 300 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 301 | "prefix": "const alphabet = " 302 | } 303 | ] 304 | }, 305 | { 306 | "completion": { 307 | "completionId": "adac237b-fd67-443a-a5f7-7c9be9e45215", 308 | "text": "const alphabet = 'abcdefghijklmnopqrstuvwxyz'\nconst numbers = '0123456789'", 309 | "stop": "<|endofmiddle|>", 310 | "score": -1.9936787108188172, 311 | "tokens": [ 312 | "6", 313 | "68612", 314 | "1270", 315 | "1040", 316 | "5219", 317 | "284", 318 | "364", 319 | "11531", 320 | "12901", 321 | "17458", 322 | "24", 323 | "6", 324 | "100299" 325 | ], 326 | "decodedTokens": [ 327 | "'", 328 | "abcdefghijklmnopqrstuvwxyz", 329 | "'\n", 330 | "const", 331 | " numbers", 332 | " =", 333 | " '", 334 | "012", 335 | "345", 336 | "678", 337 | "9", 338 | "'", 339 | "<|endofmiddle|>" 340 | ], 341 | "probabilities": [ 342 | 0.2410283237695694, 343 | 0.634307324886322, 344 | 0.4231034517288208, 345 | 0.7778605818748474, 346 | 0.13147321343421936, 347 | 0.9941889047622681, 348 | 0.5793276429176331, 349 | 0.6870170831680298, 350 | 0.9950946569442749, 351 | 0.994903564453125, 352 | 0.9754427671432495, 353 | 0.3961578607559204, 354 | 0.9915995597839355 355 | ], 356 | "adjustedProbabilities": [ 357 | 0.17243333160877228, 358 | 0.9864425659179688, 359 | 0.7749241590499878, 360 | 0.998525083065033, 361 | 0.12466666847467422, 362 | 0.9999997615814209, 363 | 0.866196870803833, 364 | 0.8840391039848328, 365 | 0.9999995231628418, 366 | 1, 367 | 0.9999654293060303, 368 | 0.3785102367401123, 369 | 1 370 | ], 371 | "generatedLength": "13", 372 | "stopReason": "STOP_REASON_STOP_PATTERN", 373 | "originalText": "'abcdefghijklmnopqrstuvwxyz'\nconst numbers = '0123456789'<|endofmiddle|>" 374 | }, 375 | "range": { 376 | "endOffset": "17", 377 | "startPosition": {}, 378 | "endPosition": { 379 | "col": "17" 380 | } 381 | }, 382 | "source": "COMPLETION_SOURCE_NETWORK", 383 | "completionParts": [ 384 | { 385 | "text": "'abcdefghijklmnopqrstuvwxyz'", 386 | "offset": "17", 387 | "type": "COMPLETION_PART_TYPE_INLINE", 388 | "prefix": "const alphabet = " 389 | }, 390 | { 391 | "text": "'abcdefghijklmnopqrstuvwxyz'", 392 | "offset": "17", 393 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 394 | "prefix": "const alphabet = " 395 | }, 396 | { 397 | "text": "const numbers = '0123456789'", 398 | "offset": "17", 399 | "type": "COMPLETION_PART_TYPE_BLOCK" 400 | } 401 | ] 402 | }, 403 | { 404 | "completion": { 405 | "completionId": "f173d8b9-adf9-4246-8156-c1fa0968c061", 406 | "text": "const alphabet = 'abcdefghijklmnopqrstuvwxyz'\nconst number = 123", 407 | "stop": "<|endofmiddle|>", 408 | "score": -2.0865020598094772, 409 | "tokens": [ 410 | "6", 411 | "68612", 412 | "1270", 413 | "1040", 414 | "1396", 415 | "284", 416 | "220", 417 | "4513", 418 | "100299" 419 | ], 420 | "decodedTokens": [ 421 | "'", 422 | "abcdefghijklmnopqrstuvwxyz", 423 | "'\n", 424 | "const", 425 | " number", 426 | " =", 427 | " ", 428 | "123", 429 | "<|endofmiddle|>" 430 | ], 431 | "probabilities": [ 432 | 0.2410283237695694, 433 | 0.634307324886322, 434 | 0.4231034517288208, 435 | 0.7778605818748474, 436 | 0.09618785977363586, 437 | 0.9696021676063538, 438 | 0.8942249417304993, 439 | 0.6518176198005676, 440 | 0.076181560754776 441 | ], 442 | "adjustedProbabilities": [ 443 | 0.17243333160877228, 444 | 0.9864425659179688, 445 | 0.7749241590499878, 446 | 0.998525083065033, 447 | 0.05707655847072601, 448 | 0.9999984502792358, 449 | 0.9980985522270203, 450 | 0.9948039650917053, 451 | 1 452 | ], 453 | "generatedLength": "9", 454 | "stopReason": "STOP_REASON_STOP_PATTERN", 455 | "originalText": "'abcdefghijklmnopqrstuvwxyz'\nconst number = 123<|endofmiddle|>" 456 | }, 457 | "range": { 458 | "endOffset": "17", 459 | "startPosition": {}, 460 | "endPosition": { 461 | "col": "17" 462 | } 463 | }, 464 | "source": "COMPLETION_SOURCE_NETWORK", 465 | "completionParts": [ 466 | { 467 | "text": "'abcdefghijklmnopqrstuvwxyz'", 468 | "offset": "17", 469 | "type": "COMPLETION_PART_TYPE_INLINE", 470 | "prefix": "const alphabet = " 471 | }, 472 | { 473 | "text": "'abcdefghijklmnopqrstuvwxyz'", 474 | "offset": "17", 475 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 476 | "prefix": "const alphabet = " 477 | }, 478 | { 479 | "text": "const number = 123", 480 | "offset": "17", 481 | "type": "COMPLETION_PART_TYPE_BLOCK" 482 | } 483 | ] 484 | } 485 | ], 486 | "filteredCompletionItems": [ 487 | { 488 | "completion": { 489 | "completionId": "3b5d4475-751d-46b2-b6d3-77bfe0d10bd8", 490 | "text": "const alphabet = \"abcdefghijklmnopqrstuvwxyz\"", 491 | "stop": "\n\n", 492 | "score": -1.5627320636692759, 493 | "tokens": [ 494 | "1", 495 | "68612", 496 | "702", 497 | "1040", 498 | "3187", 499 | "284", 500 | "220", 501 | "4513", 502 | "271" 503 | ], 504 | "decodedTokens": [ 505 | "\"", 506 | "abcdefghijklmnopqrstuvwxyz", 507 | "\"\n", 508 | "const", 509 | " example", 510 | " =", 511 | " ", 512 | "123", 513 | "\n\n" 514 | ], 515 | "probabilities": [ 516 | 0.4467959403991699, 517 | 0.5885083675384521, 518 | 0.43643850088119507, 519 | 0.7753089666366577, 520 | 0.23414745926856995, 521 | 0.9090325832366943, 522 | 0.716852068901062, 523 | 0.8863968253135681, 524 | 0.5485243797302246 525 | ], 526 | "adjustedProbabilities": [ 527 | 0.8067222237586975, 528 | 0.9805918335914612, 529 | 0.828681468963623, 530 | 0.998873770236969, 531 | 0.6884543299674988, 532 | 0.9997839331626892, 533 | 0.9740128517150879, 534 | 0.9997902512550354, 535 | 0.7436034679412842 536 | ], 537 | "generatedLength": "9", 538 | "stopReason": "STOP_REASON_STOP_PATTERN", 539 | "filterReasons": [ 540 | "FILTER_REASON_DUPLICATE" 541 | ], 542 | "originalText": "\"abcdefghijklmnopqrstuvwxyz\"\nconst example = 123\n\n" 543 | }, 544 | "range": { 545 | "endOffset": "17", 546 | "startPosition": {}, 547 | "endPosition": { 548 | "col": "17" 549 | } 550 | }, 551 | "source": "COMPLETION_SOURCE_NETWORK", 552 | "completionParts": [ 553 | { 554 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 555 | "offset": "17", 556 | "type": "COMPLETION_PART_TYPE_INLINE", 557 | "prefix": "const alphabet = " 558 | }, 559 | { 560 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 561 | "offset": "17", 562 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 563 | "prefix": "const alphabet = " 564 | } 565 | ] 566 | }, 567 | { 568 | "completion": { 569 | "completionId": "f9386bed-70fb-4760-8e0c-4ccf4b20a713", 570 | "text": "const alphabet = \"abcdefghijklmnopqrstuvwxyz\"\nconst numbers = \"0123456789\"", 571 | "stop": "<|endofmiddle|>", 572 | "score": -1.7445913846352432, 573 | "tokens": [ 574 | "1", 575 | "68612", 576 | "702", 577 | "1040", 578 | "5219", 579 | "284", 580 | "330", 581 | "11531", 582 | "12901", 583 | "17458", 584 | "24", 585 | "702", 586 | "100299" 587 | ], 588 | "decodedTokens": [ 589 | "\"", 590 | "abcdefghijklmnopqrstuvwxyz", 591 | "\"\n", 592 | "const", 593 | " numbers", 594 | " =", 595 | " \"", 596 | "012", 597 | "345", 598 | "678", 599 | "9", 600 | "\"\n", 601 | "<|endofmiddle|>" 602 | ], 603 | "probabilities": [ 604 | 0.4467959403991699, 605 | 0.5885083675384521, 606 | 0.43643850088119507, 607 | 0.7753089666366577, 608 | 0.13237497210502625, 609 | 0.9959436058998108, 610 | 0.5902878642082214, 611 | 0.6605963706970215, 612 | 0.9968718886375427, 613 | 0.99399334192276, 614 | 0.9554901123046875, 615 | 0.5480461716651917, 616 | 0.06993699073791504 617 | ], 618 | "adjustedProbabilities": [ 619 | 0.8067222237586975, 620 | 0.9805918335914612, 621 | 0.828681468963623, 622 | 0.998873770236969, 623 | 0.16544978320598602, 624 | 1, 625 | 0.8822909593582153, 626 | 0.8479676246643066, 627 | 0.9999998807907104, 628 | 0.9999998807907104, 629 | 0.9998165965080261, 630 | 0.7556309103965759, 631 | 1 632 | ], 633 | "generatedLength": "13", 634 | "stopReason": "STOP_REASON_STOP_PATTERN", 635 | "filterReasons": [ 636 | "FILTER_REASON_DUPLICATE" 637 | ], 638 | "originalText": "\"abcdefghijklmnopqrstuvwxyz\"\nconst numbers = \"0123456789\"\n<|endofmiddle|>" 639 | }, 640 | "range": { 641 | "endOffset": "17", 642 | "startPosition": {}, 643 | "endPosition": { 644 | "col": "17" 645 | } 646 | }, 647 | "source": "COMPLETION_SOURCE_NETWORK", 648 | "completionParts": [ 649 | { 650 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 651 | "offset": "17", 652 | "type": "COMPLETION_PART_TYPE_INLINE", 653 | "prefix": "const alphabet = " 654 | }, 655 | { 656 | "text": "\"abcdefghijklmnopqrstuvwxyz\"", 657 | "offset": "17", 658 | "type": "COMPLETION_PART_TYPE_INLINE_MASK", 659 | "prefix": "const alphabet = " 660 | }, 661 | { 662 | "text": "const numbers = \"0123456789\"", 663 | "offset": "17", 664 | "type": "COMPLETION_PART_TYPE_BLOCK" 665 | } 666 | ] 667 | } 668 | ], 669 | "promptId": "1498d309-fca8-4c4d-aac7-5520abfd9856" 670 | }), 671 | ok: true 672 | }) 673 | ) 674 | 675 | const contents = { 676 | contentBefore: "const alphabet = ", 677 | contentAfter: "const example = 123" 678 | } 679 | // const result = await codeium.completion(contents, "file:///app/test.ts", "typescript", 3) 680 | // console.log(result) 681 | // expect(result.length).toEqual(3) 682 | }) 683 | 684 | 685 | -------------------------------------------------------------------------------- /src/providers/codeium.ts: -------------------------------------------------------------------------------- 1 | import { uuid } from "../utils"; 2 | import ApiBase from "../models/api"; 3 | import * as types from "./codeium.types"; 4 | import config from "../config"; 5 | 6 | const languages = { 7 | unspecified: 0, 8 | c: 1, 9 | clojure: 2, 10 | coffeescript: 3, 11 | cpp: 4, 12 | csharp: 5, 13 | css: 6, 14 | cudacpp: 7, 15 | dockerfile: 8, 16 | go: 9, 17 | groovy: 10, 18 | handlebars: 11, 19 | haskell: 12, 20 | hcl: 13, 21 | html: 14, 22 | ini: 15, 23 | java: 16, 24 | javascript: 17, 25 | json: 18, 26 | julia: 19, 27 | kotlin: 20, 28 | latex: 21, 29 | less: 22, 30 | lua: 23, 31 | makefile: 24, 32 | markdown: 25, 33 | objectivec: 26, 34 | objectivecpp: 27, 35 | perl: 28, 36 | php: 29, 37 | plaintext: 30, 38 | protobuf: 31, 39 | pbtxt: 32, 40 | python: 33, 41 | r: 34, 42 | ruby: 35, 43 | rust: 36, 44 | sass: 37, 45 | scala: 38, 46 | scss: 39, 47 | shell: 40, 48 | sql: 41, 49 | starlark: 42, 50 | swift: 43, 51 | tsx: 44, 52 | typescript: 45, 53 | visualbasic: 46, 54 | vue: 47, 55 | xml: 48, 56 | xsl: 49, 57 | yaml: 50, 58 | svelte: 51, 59 | toml: 52, 60 | dart: 53, 61 | rst: 54, 62 | ocaml: 55, 63 | cmake: 56, 64 | pascal: 57, 65 | elixir: 58, 66 | fsharp: 59, 67 | lisp: 60, 68 | matlab: 61, 69 | powershell: 62, 70 | solidity: 63, 71 | ada: 64, 72 | ocaml_interface: 65, 73 | }; 74 | 75 | export default class Codeium extends ApiBase { 76 | sessionId: string; 77 | apiKey: string; 78 | 79 | constructor(apiKey: string = config.codeiumApiKey as string) { 80 | super({ 81 | url: "https://web-backend.codeium.com", 82 | headers: { 83 | "Content-Type": "application/json", 84 | }, 85 | }); 86 | 87 | this.sessionId = uuid(); 88 | this.apiKey = apiKey; 89 | } 90 | 91 | authUrl(): string { 92 | return `https://codeium.com/profile?response_type=token&redirect_uri=vim-show-auth-token&state=${this.sessionId}&scope=openid%20profile%20email&redirect_parameters_type=query`; 93 | } 94 | 95 | async register(token: string): Promise { 96 | const headers = { 97 | "Content-Type": "application/json", 98 | }; 99 | 100 | const body = { 101 | firebase_id_token: token, 102 | }; 103 | 104 | const data = await this.request({ 105 | method: "POST", 106 | headers, 107 | url: "https://api.codeium.com", 108 | endpoint: "/register_user/", 109 | body, 110 | }); 111 | 112 | return data?.api_key; 113 | } 114 | 115 | async completion( 116 | contents: any, 117 | filepath: string, 118 | languageId: string, 119 | ): Promise { 120 | const headers = { 121 | "Content-Type": "application/json", 122 | Authorization: `Basic ${this.apiKey}-${this.sessionId}`, 123 | }; 124 | 125 | filepath = filepath.replace("file://", ""); 126 | 127 | const body = { 128 | metadata: { 129 | // The editor name needs to be known by codeium 130 | ideName: "web", 131 | ideVersion: "unknown", 132 | // The version needs to a recent one, so codeium accepts it 133 | extensionVersion: "1.6.13", 134 | extensionName: "helix-gpt", 135 | apiKey: this.apiKey, 136 | sessionId: this.sessionId, 137 | }, 138 | document: { 139 | editor_language: languageId, 140 | language: languages[languageId] as number, 141 | cursor_offset: contents.contentBefore.length, 142 | line_ending: "\n", 143 | absolute_path: filepath, 144 | relative_path: filepath, 145 | text: contents.contentBefore + "\n" + contents.contentAfter, 146 | }, 147 | editor_options: { 148 | tab_size: 2, 149 | insert_spaces: true, 150 | }, 151 | other_documents: [], 152 | }; 153 | 154 | const data = await this.request({ 155 | method: "POST", 156 | body, 157 | headers, 158 | endpoint: "/exa.language_server_pb.LanguageServerService/GetCompletions", 159 | }); 160 | 161 | return types.Completion.fromResponse(data).slice(0, config.numSuggestions); 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/providers/codeium.types.ts: -------------------------------------------------------------------------------- 1 | import { uniqueStringArray } from "../utils" 2 | 3 | export class Completion extends Array { 4 | constructor(...items: string[]) { 5 | super() 6 | this.push(...uniqueStringArray(items)) 7 | } 8 | 9 | static fromResponse(data: any): Completion { 10 | return data.completionItems?.map(i => i.completion.text) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/providers/github.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "bun:test"; 2 | import Github from "./github" 3 | 4 | const github = new Github() 5 | 6 | // test("completion", async () => { 7 | // const result = await github.completion("const alphabet = ", "file:///app/test.ts", "typescript", 3) 8 | // console.log(result) 9 | // expect(result.length).toEqual(3) 10 | // }) 11 | 12 | // test("chat", async () => { 13 | // const result = await github.chat("Document this code", "const alphabet = ", "file:///app/test.ts", "typescript", 3) 14 | // console.log(result) 15 | // }) 16 | -------------------------------------------------------------------------------- /src/providers/github.ts: -------------------------------------------------------------------------------- 1 | import { genHexStr, currentUnixTimestamp } from "../utils"; 2 | import ApiBase from "../models/api"; 3 | import * as types from "./github.types"; 4 | import config from "../config"; 5 | 6 | export default class Github extends ApiBase { 7 | copilotSession?: types.CopilotSession; 8 | 9 | constructor() { 10 | super({ 11 | url: "https://github.com", 12 | headers: { 13 | "Content-Type": "application/json", 14 | }, 15 | }); 16 | } 17 | 18 | async deviceCode(): Promise { 19 | const data = await this.request({ 20 | method: "POST", 21 | endpoint: "/login/device/code", 22 | text: true, 23 | params: { 24 | scope: "read:user", 25 | client_id: "Iv1.b507a08c87ecfe98", // copilot ID 26 | }, 27 | }); 28 | 29 | return types.DeviceCode.fromResponse(data); 30 | } 31 | 32 | async accessToken(code: string): Promise { 33 | const data = await this.request({ 34 | method: "POST", 35 | endpoint: "/login/oauth/access_token", 36 | text: true, 37 | params: { 38 | client_id: "Iv1.b507a08c87ecfe98", // copilot ID 39 | device_code: code, 40 | grant_type: "urn:ietf:params:oauth:grant-type:device_code", 41 | }, 42 | }); 43 | 44 | return types.AccessToken.fromResponse(data); 45 | } 46 | 47 | async refreshCopilotSession(): Promise { 48 | if ( 49 | this.copilotSession?.exp && 50 | this.copilotSession?.exp >= currentUnixTimestamp() 51 | ) { 52 | return; 53 | } 54 | 55 | const data = await this.request({ 56 | method: "GET", 57 | url: "https://api.github.com", 58 | endpoint: "/copilot_internal/v2/token", 59 | headers: { 60 | Authorization: `Bearer ${config.copilotApiKey}`, 61 | "Editor-Version": "helix/1.0.0", 62 | "Editor-Plugin-Version": "helix-gpt/1.0.0", 63 | "User-Agent": "helix/1.0.0", 64 | }, 65 | }); 66 | 67 | this.copilotSession = types.CopilotSession.fromResponse(data); 68 | } 69 | 70 | async chat( 71 | request: string, 72 | contents: string, 73 | filepath: string, 74 | languageId: string, 75 | ): Promise { 76 | await this.refreshCopilotSession(); 77 | 78 | const messages = [ 79 | { 80 | content: `You are an AI programming assistant.\nWhen asked for your name, you must respond with \"GitHub Copilot\".\nFollow the user's requirements carefully & to the letter.\n- Each code block starts with \`\`\` and // FILEPATH.\n- You always answer with ${languageId} code.\n- When the user asks you to document something, you must answer in the form of a ${languageId} code block.\nYour expertise is strictly limited to software development topics.\nFor questions not related to software development, simply give a reminder that you are an AI programming assistant.\nKeep your answers short and impersonal.`, 81 | role: "system", 82 | }, 83 | { 84 | content: `I have the following code in the selection:\n\`\`\`${languageId}\n// FILEPATH: ${filepath.replace("file://", "")}\n${contents}`, 85 | role: "user", 86 | }, 87 | { 88 | content: request, 89 | role: "user", 90 | }, 91 | ]; 92 | 93 | const body = { 94 | intent: true, 95 | max_tokens: 7909, 96 | model: "gpt-4", 97 | n: 1, 98 | stream: false, 99 | temperature: 0.1, 100 | top_p: 1, 101 | messages, 102 | }; 103 | 104 | const headers = { 105 | "Content-Type": "application/json; charset=utf-8", 106 | "User-Agent": "helix/1.0.0", 107 | Authorization: `Bearer ${this.copilotSession?.raw}`, 108 | "Editor-Plugin-Version": "copilot-chat/0.24.1", 109 | "Editor-Version": "vscode/1.99", 110 | "Openai-Intent": "conversation-panel", 111 | "Openai-Organization": "github-copilot", 112 | "VScode-MachineId": genHexStr(64), 113 | "VScode-SessionId": 114 | genHexStr(8) + 115 | "-" + 116 | genHexStr(4) + 117 | "-" + 118 | genHexStr(4) + 119 | "-" + 120 | genHexStr(4) + 121 | "-" + 122 | genHexStr(25), 123 | "X-Request-Id": 124 | genHexStr(8) + 125 | "-" + 126 | genHexStr(4) + 127 | "-" + 128 | genHexStr(4) + 129 | "-" + 130 | genHexStr(4) + 131 | "-" + 132 | genHexStr(12), 133 | "Accept-Encoding": "gzip,deflate,br", 134 | Accept: "*/*", 135 | Connection: "close", 136 | }; 137 | 138 | const data = await this.request({ 139 | method: "POST", 140 | body, 141 | headers, 142 | url: "https://api.githubcopilot.com", 143 | endpoint: "/chat/completions", 144 | }); 145 | 146 | return types.Chat.fromResponse(data, filepath, languageId); 147 | } 148 | 149 | async completion( 150 | contents: any, 151 | filepath: string, 152 | languageId: string, 153 | ): Promise { 154 | await this.refreshCopilotSession(); 155 | 156 | const headers = { 157 | "Content-Type": "application/json; charset=utf-8", 158 | "User-Agent": "helix/1.0.0", 159 | Authorization: `Bearer ${this.copilotSession?.raw}`, 160 | "Editor-Plugin-Version": "copilot-chat/0.24.1", 161 | "Editor-Version": "vscode/1.99", 162 | "Openai-Intent": "copilot-ghost", 163 | "Openai-Organization": "github-copilot", 164 | "VScode-MachineId": genHexStr(64), 165 | "VScode-SessionId": 166 | genHexStr(8) + 167 | "-" + 168 | genHexStr(4) + 169 | "-" + 170 | genHexStr(4) + 171 | "-" + 172 | genHexStr(4) + 173 | "-" + 174 | genHexStr(25), 175 | "X-Request-Id": 176 | genHexStr(8) + 177 | "-" + 178 | genHexStr(4) + 179 | "-" + 180 | genHexStr(4) + 181 | "-" + 182 | genHexStr(4) + 183 | "-" + 184 | genHexStr(12), 185 | "Accept-Encoding": "gzip,deflate,br", 186 | Accept: "*/*", 187 | }; 188 | 189 | const body = { 190 | extra: { 191 | language: languageId, 192 | next_indent: 0, 193 | prompt_tokens: 500, 194 | suffix_tokens: 400, 195 | trim_by_indentation: true, 196 | }, 197 | max_tokens: 500, 198 | n: config.numSuggestions, 199 | nwo: "app", 200 | prompt: `// Path: ${filepath.replace("file://", "")}\n${contents.contentBefore}`, 201 | stop: ["\n\n"], 202 | stream: true, 203 | suffix: contents.contentAfter, 204 | temperature: config.numSuggestions > 1 ? 0.4 : 0, 205 | top_p: 1, 206 | }; 207 | 208 | const data = await this.request({ 209 | method: "POST", 210 | body, 211 | headers, 212 | text: true, 213 | url: "https://proxy.individual.githubcopilot.com", 214 | endpoint: "/v1/engines/gpt-4o-copilot/completions", 215 | }); 216 | 217 | return types.Completion.fromResponse(data); 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/providers/github.types.ts: -------------------------------------------------------------------------------- 1 | import { uniqueStringArray, parseQuery, parseQueryStringToken, extractCodeBlock } from "../utils" 2 | 3 | export class DeviceCode { 4 | deviceCode: string; 5 | userCode: string; 6 | verificationUri: string; 7 | expiresIn: number; 8 | interval: number; 9 | message: string; 10 | 11 | constructor(query: any) { 12 | this.deviceCode = query.device_code 13 | this.userCode = query.user_code 14 | this.verificationUri = query.verification_uri 15 | this.expiresIn = query.expires_in 16 | this.interval = query.interval 17 | this.message = query.message 18 | } 19 | 20 | static fromResponse(data: string): DeviceCode { 21 | const query = parseQuery(data) 22 | return new DeviceCode(query) 23 | } 24 | } 25 | 26 | export class AccessToken { 27 | accessToken?: string; 28 | tokenType?: string; 29 | scope?: string; 30 | 31 | constructor(query: any) { 32 | this.accessToken = query.access_token 33 | this.tokenType = query.token_type 34 | this.scope = query.scope 35 | } 36 | 37 | static fromResponse(data: string): AccessToken { 38 | const query = parseQuery(data) 39 | return new AccessToken(query) 40 | } 41 | } 42 | 43 | export class CopilotSession { 44 | exp: number 45 | raw: string 46 | 47 | constructor(query: any) { 48 | this.exp = query.exp 49 | this.raw = query.raw 50 | } 51 | 52 | static fromResponse(data: any): CopilotSession { 53 | const parsedToken = parseQueryStringToken(data?.token) 54 | 55 | return new CopilotSession({ 56 | exp: parseInt(parsedToken.exp), 57 | raw: data?.token 58 | }) 59 | } 60 | } 61 | 62 | export class Completion extends Array { 63 | constructor(...items: string[]) { 64 | super(); 65 | this.push(...uniqueStringArray(items)); 66 | } 67 | 68 | static fromResponse(text: string): Completion { 69 | const data = text.split('\n').map(i => i.slice(5)).map((i) => { 70 | try { 71 | return JSON.parse(i).choices[0] 72 | } catch (e) { return null } 73 | }).filter(i => i).reduce(function(r, a) { 74 | r[a.index] = r[a.index] || []; 75 | r[a.index].push(a); 76 | return r; 77 | }, Object.create(null)) 78 | 79 | const items = Object.values(data).map((i) => i.map(i => i.text).join('')) 80 | return new Completion(...items as string[]) 81 | } 82 | } 83 | 84 | export class Chat { 85 | 86 | result: string 87 | 88 | constructor(data: string) { 89 | this.result = data 90 | } 91 | 92 | static fromResponse(data: any, filepath: string, language: string): Chat { 93 | const choices = data?.choices?.map(i => i.message?.content) 94 | const result = extractCodeBlock(filepath, choices[0], language) 95 | return new Chat(result as string) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/providers/ollama.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test, mock, jest } from "bun:test"; 2 | import Ollama from "./ollama"; 3 | 4 | const ollama = new Ollama(); 5 | 6 | test("completion", async () => { 7 | global.fetch = jest.fn(() => 8 | Promise.resolve({ 9 | json: () => 10 | Promise.resolve({ 11 | message: { 12 | role: "assistant", 13 | content: 14 | 'const name: string = "John";\nconsole.log("Hello, " + name);', 15 | }, 16 | }), 17 | ok: true, 18 | }), 19 | ); 20 | 21 | const result = await ollama.completion( 22 | "test", 23 | "file:///app/test.ts", 24 | "typescript", 25 | 3, 26 | ); 27 | expect(result).toEqual([ 28 | 'const name: string = "John";\nconsole.log("Hello, " + name);' 29 | ]); 30 | expect(result.length).toEqual(1); 31 | }); 32 | 33 | test("chat", async () => { 34 | global.fetch = jest.fn(() => 35 | Promise.resolve({ 36 | json: () => 37 | Promise.resolve({ 38 | message: { 39 | role: "assistant", 40 | content: `\`\`\`typescript 41 | // FILEPATH: /app/test.ts 42 | 43 | if (config.authCopilot) { 44 | process.exit(0) 45 | } 46 | \`\`\``, 47 | }, 48 | }), 49 | ok: true, 50 | }), 51 | ); 52 | 53 | const { result } = await ollama.chat( 54 | "test", 55 | "test", 56 | "file:///app/test.ts", 57 | "typescript", 58 | ); 59 | expect(result).toEqual("\nif (config.authCopilot) {\n process.exit(0)\n}\n"); 60 | }); 61 | -------------------------------------------------------------------------------- /src/providers/ollama.ts: -------------------------------------------------------------------------------- 1 | import ApiBase from "../models/api"; 2 | import * as types from "./ollama.types"; 3 | import config from "../config"; 4 | import { log } from "../utils"; 5 | 6 | export default class Ollama extends ApiBase { 7 | private timeout: number; 8 | private model: string; 9 | 10 | constructor() { 11 | super({ 12 | url: config.ollamaEndpoint as string, 13 | headers: { 14 | "Content-Type": "application/json", 15 | }, 16 | }); 17 | this.timeout = parseInt(config.ollamaTimeout, 10); 18 | this.model = config.ollamaModel; 19 | } 20 | 21 | async chat( 22 | request: string, 23 | contents: string, 24 | filepath: string, 25 | languageId: string, 26 | ): Promise { 27 | const messages = [ 28 | { 29 | content: `You are an AI programming assistant.\nWhen asked for your name, you must respond with \"GitHub Copilot\".\nFollow the user's requirements carefully & to the letter.\n- Each code block starts with \`\`\` and // FILEPATH.\n- You always answer with ${languageId} code.\n- When the user asks you to document something, you must answer in the form of a ${languageId} code block.\nYour expertise is strictly limited to software development topics.\nFor questions not related to software development, simply give a reminder that you are an AI programming assistant.\nKeep your answers short and impersonal.`, 30 | role: "system", 31 | }, 32 | { 33 | content: `I have the following code in the selection:\n\`\`\`${languageId}\n// FILEPATH: ${filepath.replace("file://", "")}\n${contents}`, 34 | role: "user", 35 | }, 36 | { 37 | content: request, 38 | role: "user", 39 | }, 40 | ]; 41 | 42 | log( 43 | "prompt", 44 | messages.map((m) => `role: ${m.role}\n${m.content}`).join("\n"), 45 | ); 46 | 47 | const body = { 48 | model: this.model, 49 | stream: false, 50 | messages, 51 | }; 52 | 53 | const data = await this.request({ 54 | method: "POST", 55 | body, 56 | endpoint: "/api/chat", 57 | timeout: this.timeout, 58 | }); 59 | 60 | log("content", data.message.content); 61 | 62 | return types.Chat.fromResponse(data, filepath, languageId); 63 | } 64 | 65 | async completion( 66 | contents: any, 67 | filepath: string, 68 | languageId: string, 69 | ): Promise { 70 | const messages = [ 71 | { 72 | role: "system", 73 | content: 74 | config.ollamaContext?.replace("", languageId) + 75 | "\n\n" + 76 | `End of file context:\n\n${contents.contentAfter}`, 77 | }, 78 | { 79 | role: "user", 80 | content: `Start of file context:\n\n${contents.contentBefore}`, 81 | }, 82 | ]; 83 | 84 | log( 85 | "prompt", 86 | messages.map((m) => `role: ${m.role}\n${m.content}`).join("\n"), 87 | ); 88 | 89 | const body = { 90 | model: this.model, 91 | stream: false, 92 | messages, 93 | }; 94 | 95 | const data = await this.request({ 96 | method: "POST", 97 | body, 98 | endpoint: "/api/chat", 99 | timeout: this.timeout, 100 | }); 101 | 102 | log("content", data.message.content); 103 | 104 | return types.Completion.fromResponse(data); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/providers/ollama.types.ts: -------------------------------------------------------------------------------- 1 | import { uniqueStringArray, extractCodeBlock, log } from "../utils"; 2 | 3 | export class Completion extends Array { 4 | constructor(...items: string[]) { 5 | super(); 6 | this.push(...uniqueStringArray(items)); 7 | } 8 | 9 | static fromResponse(data: any): Completion { 10 | return new Completion(data.message.content as string); 11 | } 12 | } 13 | 14 | export class Chat { 15 | result: string; 16 | 17 | constructor(data: string) { 18 | this.result = data; 19 | } 20 | 21 | static fromResponse(data: any, filepath: string, language: string): Chat { 22 | const content = data.message.content as string; 23 | const result = extractCodeBlock(filepath, content, language); 24 | return new Chat(result as string); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/providers/openai.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test, mock, jest } from "bun:test"; 2 | import Openai from "./openai" 3 | 4 | const openai = new Openai() 5 | 6 | test("completion", async () => { 7 | global.fetch = jest.fn(() => 8 | Promise.resolve({ 9 | json: () => Promise.resolve({ 10 | "choices": [ 11 | { 12 | "index": 0, 13 | "message": { 14 | "role": "assistant", 15 | "content": "const name: string = \"John\";\nconsole.log(\"Hello, \" + name);" 16 | }, 17 | }, 18 | { 19 | "index": 1, 20 | "message": { 21 | "role": "assistant", 22 | "content": "console.log(\"Hello, world!\");" 23 | }, 24 | }, 25 | { 26 | "index": 2, 27 | "message": { 28 | "role": "assistant", 29 | "content": "const name: string = \"John\";\nconsole.log(\"Hello, \" + name);" 30 | }, 31 | } 32 | ], 33 | }), 34 | ok: true 35 | }) 36 | ); 37 | 38 | const result = await openai.completion("test", "file:///app/test.ts", "typescript", 3) 39 | expect(result).toEqual(["const name: string = \"John\";\nconsole.log(\"Hello, \" + name);", "console.log(\"Hello, world!\");"]) 40 | expect(result.length).toEqual(2) 41 | }) 42 | 43 | test("chat", async () => { 44 | global.fetch = jest.fn(() => 45 | Promise.resolve({ 46 | json: () => Promise.resolve({ 47 | "choices": [ 48 | { 49 | "index": 0, 50 | "message": { 51 | "role": "assistant", 52 | "content": `\`\`\`typescript 53 | // FILEPATH: /app/test.ts 54 | 55 | if (config.authCopilot) { 56 | process.exit(0) 57 | } 58 | \`\`\`` 59 | }, 60 | }, 61 | ], 62 | }), 63 | ok: true 64 | }) 65 | ); 66 | 67 | const { result } = await openai.chat("test", "test", "file:///app/test.ts", "typescript") 68 | expect(result).toEqual("\nif (config.authCopilot) {\n process.exit(0)\n}\n") 69 | }) 70 | 71 | -------------------------------------------------------------------------------- /src/providers/openai.ts: -------------------------------------------------------------------------------- 1 | import ApiBase from "../models/api"; 2 | import * as types from "./openai.types"; 3 | import config from "../config"; 4 | import { log } from "../utils"; 5 | 6 | export default class Github extends ApiBase { 7 | constructor() { 8 | super({ 9 | url: config.openaiEndpoint as string, 10 | headers: { 11 | "Content-Type": "application/json", 12 | Authorization: `Bearer ${config.openaiKey}`, 13 | }, 14 | }); 15 | } 16 | 17 | async chat( 18 | request: string, 19 | contents: string, 20 | filepath: string, 21 | languageId: string, 22 | ): Promise { 23 | const messages = [ 24 | { 25 | content: `You are an AI programming assistant.\nWhen asked for your name, you must respond with \"GitHub Copilot\".\nFollow the user's requirements carefully & to the letter.\n- Each code block starts with \`\`\` and // FILEPATH.\n- You always answer with ${languageId} code.\n- When the user asks you to document something, you must answer in the form of a ${languageId} code block.\nYour expertise is strictly limited to software development topics.\nFor questions not related to software development, simply give a reminder that you are an AI programming assistant.\nKeep your answers short and impersonal.`, 26 | role: "system", 27 | }, 28 | { 29 | content: `I have the following code in the selection:\n\`\`\`${languageId}\n// FILEPATH: ${filepath.replace("file://", "")}\n${contents}`, 30 | role: "user", 31 | }, 32 | { 33 | content: request, 34 | role: "user", 35 | }, 36 | ]; 37 | 38 | const body = { 39 | max_tokens: 7909, 40 | model: "gpt-4", 41 | n: 1, 42 | stream: false, 43 | temperature: 0.1, 44 | top_p: 1, 45 | messages, 46 | }; 47 | 48 | const data = await this.request({ 49 | method: "POST", 50 | body, 51 | endpoint: "/v1/chat/completions", 52 | timeout: 10000, 53 | }); 54 | 55 | return types.Chat.fromResponse(data, filepath, languageId); 56 | } 57 | 58 | async completion( 59 | contents: any, 60 | filepath: string, 61 | languageId: string, 62 | ): Promise { 63 | const messages = [ 64 | { 65 | role: "system", 66 | content: 67 | config.openaiContext?.replace("", languageId) + 68 | "\n\n" + 69 | `End of file context:\n\n${contents.contentAfter}`, 70 | }, 71 | { 72 | role: "user", 73 | content: `Start of file context:\n\n${contents.contentBefore}`, 74 | }, 75 | ]; 76 | 77 | const body = { 78 | model: config.openaiModel, 79 | max_tokens: parseInt(config.openaiMaxTokens as string), 80 | n: config.numSuggestions, 81 | temperature: suggestions > 1 ? 0.4 : 0, 82 | top_p: 1, 83 | frequency_penalty: 1, 84 | presence_penalty: 2, 85 | messages, 86 | }; 87 | 88 | const data = await this.request({ 89 | method: "POST", 90 | body, 91 | endpoint: "/v1/chat/completions", 92 | }); 93 | 94 | return types.Completion.fromResponse(data); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/providers/openai.types.ts: -------------------------------------------------------------------------------- 1 | import { uniqueStringArray, extractCodeBlock, log } from "../utils" 2 | 3 | export class Completion extends Array { 4 | constructor(...items: string[]) { 5 | super(); 6 | this.push(...uniqueStringArray(items)); 7 | } 8 | 9 | static fromResponse(data: any): Completion { 10 | const choices = data?.choices?.map(i => i.message.content) 11 | return new Completion(...choices) 12 | } 13 | } 14 | 15 | export class Chat { 16 | 17 | result: string 18 | 19 | constructor(data: string) { 20 | this.result = data 21 | } 22 | 23 | static fromResponse(data: any, filepath: string, language: string): Chat { 24 | const choices = data?.choices?.map(i => i.message?.content) 25 | const result = extractCodeBlock(filepath, choices[0], language) 26 | return new Chat(result as string) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/utils.test.ts: -------------------------------------------------------------------------------- 1 | import { expect, test } from "bun:test"; 2 | import { extractCodeBlock, parseQuery, getContent } from "./utils" 3 | 4 | test("extractCodeBlock", async () => { 5 | const content = `\`\`\`typescript 6 | // FILEPATH: /app/example.ts 7 | 8 | /** 9 | * Generates a random string. 10 | * @returns {string} A random string. 11 | */ 12 | const randomString = () => Math.random().toString(36).substring(7); 13 | \`\`\`` 14 | 15 | const result = extractCodeBlock("/app/example.ts", content, "typescript") 16 | expect(result).toEqual(` 17 | /** 18 | * Generates a random string. 19 | * @returns {string} A random string. 20 | */ 21 | const randomString = () => Math.random().toString(36).substring(7); 22 | `) 23 | }) 24 | 25 | test("parseQuery", () => { 26 | const result = parseQuery("q=hello&lang=typescript&limit=10") 27 | 28 | expect(result).toEqual({ 29 | q: "hello", 30 | lang: "typescript", 31 | limit: "10" 32 | }) 33 | }) 34 | 35 | test("getContent", async () => { 36 | const content = "import example from './test'\n\nconst alphabet = \nconst example2 = 123" 37 | const { lastLine, contentBefore, contentAfter } = await getContent(content, 2, 17) 38 | expect(lastLine).toEqual("const alphabet = ") 39 | expect(contentBefore).toEqual("import example from './test'\n\nconst alphabet = ") 40 | expect(contentAfter).toEqual("const example2 = 123") 41 | }) 42 | 43 | 44 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import config from "./config" 2 | import crypto from "crypto" 3 | import fs from "fs" 4 | 5 | const debounces: Record = {} 6 | 7 | export const debounce = (key: string, fn: () => void, timeoutMs: number) => { 8 | if (debounces[key]) clearTimeout(debounces[key]) 9 | debounces[key] = setTimeout(fn, timeoutMs) 10 | } 11 | 12 | export const parseQuery = (queryString: string) => { 13 | const params = new URLSearchParams(queryString); 14 | return Object.fromEntries(params.entries()); 15 | } 16 | 17 | export const genHexStr = (length: number) => { 18 | const bytes = crypto.randomBytes(length / 2); 19 | return bytes.toString('hex'); 20 | } 21 | 22 | export const uuid = () => { 23 | return genHexStr(8) + "-" + genHexStr(4) + "-" + genHexStr(4) + "-" + genHexStr(4) + "-" + genHexStr(12); 24 | } 25 | 26 | export const getContent = async (contents: string, line: number, column: number) => { 27 | const lines = contents?.split('\n').slice(0, line + 1) 28 | lines[lines.length - 1] = lines[lines.length - 1].split('').slice(0, column).join('') 29 | const lastLine = lines[lines.length - 1] 30 | const contentBefore = lines.join('\n') 31 | const contentAfter = contents?.split('\n').slice(line + 1).join('\n') 32 | const lastCharacter = contentBefore.slice(-1) 33 | const contentImmediatelyAfter = contents?.split('\n')[line].slice(column) 34 | return { contentBefore, contentAfter, lastCharacter, lastLine, contentImmediatelyAfter } 35 | } 36 | 37 | let logStream: fs.WriteStream | undefined 38 | 39 | export const log = (...args: any) => { 40 | if (!config.logFile) return 41 | 42 | if (Bun.env.TEST_RUNNER) { 43 | console.log(xlog(...args)) 44 | } else if (config.logFile?.length) { 45 | if (!logStream) logStream = fs.createWriteStream(config.logFile) 46 | 47 | try { 48 | logStream.write(xlog(...args) + "\n\n") 49 | } catch (e) { } 50 | } 51 | } 52 | 53 | export const xlog = (...args: any) => { 54 | let newArgs = []; 55 | 56 | args.forEach((arg) => { 57 | newArgs.push(arg); 58 | newArgs.push("|"); 59 | }); 60 | 61 | newArgs = newArgs.slice(0, newArgs.length - 1); 62 | return ["APP", new Date().toISOString(), "-->", ...newArgs].join(' ') 63 | }; 64 | 65 | export const uniqueStringArray = (array: string[]): string[] => { 66 | return Array.from(new Set(array)); 67 | }; 68 | 69 | export const parseQueryStringToken = (input: string): Record => { 70 | if (!input?.length) return {} 71 | const record: Record = {}; 72 | const pairs = input.split(";"); 73 | 74 | for (const pair of pairs) { 75 | const [key, value] = pair.split("="); 76 | record[key] = value; 77 | } 78 | 79 | return record; 80 | } 81 | 82 | export const currentUnixTimestamp = () => { 83 | return Math.floor(Date.now() / 1000) 84 | } 85 | 86 | export const extractCodeBlock = (filepath: string, text: string, language: string): string | undefined => { 87 | const pattern = new RegExp(`\`\`\`${language}([\\s\\S]*?)\`\`\``, 'g'); 88 | let match; 89 | const blocks: string[] = []; 90 | 91 | while ((match = pattern.exec(text)) !== null) { 92 | blocks.push(match[0]); 93 | } 94 | 95 | const result = blocks[0]; 96 | if (!result?.length) return 97 | const lines = result?.replace(`// FILEPATH: ${filepath.replace('file://', '')}\n`, '')?.split('\n'); 98 | return lines?.slice(1, lines.length - 1)?.join('\n') + "\n"; 99 | } 100 | 101 | export const bytesToString = (bytes: number): string => { 102 | if (bytes < 1024) return bytes + " B" 103 | else if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(2) + " KB" 104 | else if (bytes < 1024 * 1024 * 1024) return (bytes / 1024 / 1024).toFixed(2) + " MB" 105 | else return (bytes / 1024 / 1024 / 1024).toFixed(2) + " GB" 106 | } 107 | 108 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["ESNext"], 4 | "target": "ESNext", 5 | "module": "ESNext", 6 | "moduleDetection": "force", 7 | "jsx": "react-jsx", 8 | "allowJs": true, 9 | 10 | /* Bundler mode */ 11 | "moduleResolution": "bundler", 12 | "allowImportingTsExtensions": true, 13 | "verbatimModuleSyntax": true, 14 | "noEmit": true, 15 | 16 | /* Linting */ 17 | "skipLibCheck": true, 18 | "strict": true, 19 | "noFallthroughCasesInSwitch": true, 20 | "forceConsistentCasingInFileNames": true 21 | } 22 | } 23 | --------------------------------------------------------------------------------