├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── auto-commit-build.yml │ └── ci.yml ├── .gitignore ├── api └── handler.mjs ├── assets ├── demo.png └── vision-demo.jpeg ├── biome.jsonc ├── build.mjs ├── changelog.md ├── deno.jsonc ├── deno.lock ├── dist ├── main_bun.mjs ├── main_cloudflare-workers.mjs ├── main_deno.mjs └── main_node.mjs ├── docker ├── bun.Dockerfile ├── deno.Dockerfile └── node.Dockerfile ├── fly.toml ├── generate-opeapi-types.ts ├── license ├── main_bun.ts ├── main_cloudflare-workers.ts ├── main_deno.ts ├── main_node.ts ├── package.json ├── readme.md ├── src ├── app.ts ├── gemini-api-client │ ├── errors.ts │ ├── gemini-api-client.ts │ ├── response-helper.ts │ └── types.ts ├── gemini-proxy.ts ├── generated-types │ ├── gemini-types.ts │ └── openai-types.ts ├── hello.ts ├── log.ts ├── openai │ ├── chat │ │ └── completions │ │ │ ├── ChatProxyHandler.ts │ │ │ ├── NonStreamingChatProxyHandler.ts │ │ │ └── StreamingChatProxyHandler.ts │ ├── embeddingProxyHandler.ts │ └── models.ts ├── reset.d.ts ├── types.ts └── utils.ts ├── test ├── chat-completion_test.ts ├── get_token_test.ts ├── mock-fetch.ts ├── models_test.ts └── test-data.ts ├── tsconfig.json ├── vercel.json └── zbpack.json /.dockerignore: -------------------------------------------------------------------------------- 1 | .env 2 | node_modules/ 3 | .idea/ 4 | .vscode/ 5 | 6 | bun.lockb -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: monthly 7 | 8 | - package-ecosystem: npm 9 | directory: / 10 | schedule: 11 | interval: monthly 12 | -------------------------------------------------------------------------------- /.github/workflows/auto-commit-build.yml: -------------------------------------------------------------------------------- 1 | name: Auto Commit Build Artifact 2 | 3 | permissions: 4 | contents: write 5 | packages: write 6 | 7 | on: 8 | push: 9 | branches: 10 | - main 11 | workflow_dispatch: 12 | 13 | jobs: 14 | test: 15 | name: Test on deno and ${{ matrix.os }} 16 | runs-on: ${{ matrix.os }} 17 | env: 18 | ACTIONS_ALLOW_UNSECURE_COMMANDS: true 19 | strategy: 20 | matrix: 21 | os: [ 22 | # 23 | ubuntu-latest, 24 | # windows-latest, 25 | # macOS-latest, 26 | ] 27 | 28 | steps: 29 | - run: git config --global core.autocrlf false 30 | 31 | - uses: actions/checkout@v4 32 | 33 | - name: Setup Deno 34 | uses: denoland/setup-deno@v2 35 | 36 | - name: Run test 37 | run: deno task test-cov 38 | 39 | - name: Run build 40 | run: deno task build:deno 41 | 42 | - name: Coveralls 43 | uses: coverallsapp/github-action@v2 44 | with: 45 | files: coverage/lcov.info 46 | fail-on-error: false 47 | 48 | - uses: EndBug/add-and-commit@v9 # You can change this to use a specific version. 49 | with: 50 | add: "dist" 51 | # Whether to push the commit and, if any, its tags to the repo. It can also be used to set the git push arguments (see the paragraph below for more info) 52 | # Default: true 53 | push: true 54 | default_author: github_actions 55 | committer_name: GitHub Actions 56 | committer_email: actions@github.com 57 | message: "🚨 Commit Build Artifact from GitHub Actions" 58 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | permissions: 4 | contents: read 5 | packages: write 6 | 7 | on: 8 | push: 9 | workflow_dispatch: 10 | pull_request: 11 | 12 | env: 13 | IMAGE_TAG: ${{ github.ref_name }} 14 | IMAGE_NAME: ghcr.io/${{ github.repository }} 15 | 16 | jobs: 17 | build-image: 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | include: 22 | - runtime: deno 23 | - runtime: node 24 | - runtime: bun 25 | runs-on: ubuntu-latest 26 | steps: 27 | - name: Checkout 28 | uses: actions/checkout@v4 29 | - name: Set up QEMU 30 | uses: docker/setup-qemu-action@v3 31 | - name: Set up Docker Buildx 32 | uses: docker/setup-buildx-action@v3 33 | - name: Login to GitHub Container Registry 34 | uses: docker/login-action@v3 35 | with: 36 | registry: ghcr.io 37 | username: ${{ github.repository_owner }} 38 | password: ${{ secrets.GITHUB_TOKEN }} 39 | - uses: actions/github-script@v7 40 | id: set-tag-name 41 | with: 42 | script: return '${{ github.ref_name }}'.toLowerCase().replaceAll(/[/.]/g, '-').trim('-') 43 | result-encoding: string 44 | - name: Get result 45 | run: echo "${{steps.set-tag-name.outputs.result}}" 46 | - name: Build and push 47 | uses: docker/build-push-action@v6 48 | with: 49 | context: . 50 | file: docker/${{ matrix.runtime }}.Dockerfile 51 | platforms: linux/amd64,linux/arm64 52 | push: ${{ startsWith(github.ref, 'refs/tags/v') }} 53 | provenance: false 54 | tags: | 55 | ${{ env.IMAGE_NAME }}:${{ steps.set-tag-name.outputs.result }}_${{ matrix.runtime }} 56 | ${{ env.IMAGE_NAME }}:${{ matrix.runtime }} 57 | biome-ci-check: 58 | name: biome check 59 | runs-on: ubuntu-latest 60 | steps: 61 | - name: Checkout 62 | uses: actions/checkout@v4 63 | - name: Setup Biome CLI 64 | uses: biomejs/setup-biome@v2 65 | - name: Run Biome 66 | run: biome ci --reporter=github . 67 | test: 68 | runs-on: ubuntu-latest 69 | steps: 70 | - uses: actions/checkout@v4 71 | - name: Setup Deno 72 | uses: denoland/setup-deno@v2 73 | - name: Run test 74 | run: deno task test-cov 75 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | node_modules/ 3 | .idea/ 4 | .vscode/ 5 | .zed/ 6 | 7 | bun.lockb 8 | pnpm-lock.yaml 9 | 10 | .DS_Store 11 | .wrangler/ 12 | 13 | coverage/ 14 | vendor/ 15 | -------------------------------------------------------------------------------- /api/handler.mjs: -------------------------------------------------------------------------------- 1 | import worker from "../dist/main_cloudflare-workers.mjs" 2 | 3 | export default worker.fetch 4 | 5 | export const config = { 6 | runtime: "edge", 7 | // Available languages and regions for Google AI Studio and Gemini API 8 | // https://ai.google.dev/available_regions#available_regions 9 | // https://vercel.com/docs/concepts/edge-network/regions 10 | regions: [ 11 | //"arn1", 12 | "bom1", 13 | //"cdg1", 14 | "cle1", 15 | "cpt1", 16 | //"dub1", 17 | //"fra1", 18 | "gru1", 19 | //"hkg1", 20 | "hnd1", 21 | "iad1", 22 | "icn1", 23 | "kix1", 24 | "pdx1", 25 | "sfo1", 26 | "sin1", 27 | "syd1", 28 | ], 29 | } 30 | -------------------------------------------------------------------------------- /assets/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zuisong/gemini-openai-proxy/568ac2abadad5e3e63091df67df4a7726abfa879/assets/demo.png -------------------------------------------------------------------------------- /assets/vision-demo.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zuisong/gemini-openai-proxy/568ac2abadad5e3e63091df67df4a7726abfa879/assets/vision-demo.jpeg -------------------------------------------------------------------------------- /biome.jsonc: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://biomejs.dev/schemas/1.9.4/schema.json", 3 | "organizeImports": { 4 | "enabled": true 5 | }, 6 | "linter": { 7 | "enabled": true, 8 | "rules": { 9 | "recommended": true, 10 | "nursery": { 11 | "noEnum": "error" 12 | }, 13 | "style": { 14 | "noParameterProperties": "error", 15 | "useNamingConvention": "off", 16 | "noUnusedTemplateLiteral": "off" 17 | } 18 | } 19 | }, 20 | "formatter": { 21 | "useEditorconfig": true, 22 | "indentWidth": 2, 23 | "indentStyle": "space", 24 | "lineWidth": 120 25 | }, 26 | "javascript": { 27 | "formatter": { 28 | "semicolons": "asNeeded" 29 | } 30 | }, 31 | "json": { 32 | "parser": { 33 | "allowTrailingCommas": true, 34 | "allowComments": true 35 | } 36 | }, 37 | "files": { 38 | "ignore": ["./cloudflare-workers/**", "./node_modules/**", "./dist/**", "coverage/", "src/generated-types/"], 39 | "ignoreUnknown": true 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /build.mjs: -------------------------------------------------------------------------------- 1 | import { rmdir } from "node:fs/promises" 2 | import * as esbuild from "esbuild" 3 | 4 | await rmdir("./dist").catch((_) => {}) 5 | 6 | for (const f of ["main_bun", "main_cloudflare-workers", "main_deno", "main_node"]) { 7 | /** 8 | * @type {esbuild.BuildOptions} 9 | */ 10 | const config = { 11 | entryPoints: [`./${f}.ts`], 12 | outfile: `./dist/${f}.mjs`, 13 | format: "esm", 14 | minify: false, 15 | bundle: true, 16 | platform: "node", 17 | packages: "bundle", 18 | treeShaking: true, 19 | legalComments: "none", 20 | outExtension: { ".js": `.mjs` }, 21 | target: ["chrome100", "node18"], 22 | external: ["node:*"], 23 | } 24 | await esbuild.build(config) 25 | } 26 | -------------------------------------------------------------------------------- /changelog.md: -------------------------------------------------------------------------------- 1 | # changelog 2 | 3 | ## 0.15.0 (Unreleased) 4 | 5 | - ... 6 | 7 | ## 0.14.2 8 | 9 | - Update model mapping 10 | 11 | | Request Model | Target Gemini Model | 12 | | -------------------- | -------------------------- | 13 | | gpt-3.5-turbo | gemini-1.5-flash-8b-latest | 14 | | gpt-4 | gemini-1.5-pro-latest | 15 | | gpt-4o | gemini-1.5-flash-latest | 16 | | gpt-4o-mini | gemini-1.5-flash-8b-latest | 17 | | gpt-4-vision-preview | gemini-1.5-flash-latest | 18 | | gpt-4-turbo | gemini-1.5-pro-latest | 19 | | gpt-4-turbo-preview | gemini-2.0-flash-exp | 20 | | gemini* | gemini* | 21 | | ...(others) | gemini-1.5-flash-latest | 22 | 23 | ## 0.14.0 (2024-11-25) 24 | 25 | - Add support for `gemini-*` model names. Now we can use 26 | `gemini-1.5-flash-8b-exp-0924`, `gemini-exp-1114`, `gemini-1.5-flash-8b`, etc. 27 | - Add embedding endpoint support. 28 | 29 | ``` 30 | curl https://gemini-openai-proxy.deno.dev/v1/embeddings \ 31 | -H "Content-Type: application/json" \ 32 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \ 33 | -d '{ 34 | "input": "Your text string goes here", 35 | "model": "text-embedding-3-small" 36 | }' 37 | ``` 38 | 39 | - Fix bugs (#83) 40 | 41 | ## 0.13.0 (2024-06-14) 42 | 43 | - Support response format by @Quilljou in #51 44 | - use streamGenerateContent api, truly support stream response by @zuisong in 45 | #55 46 | 47 | ## 0.12.0 (2024-05-16) 48 | 49 | - add support for `gemini-1.5-pro-vision-latest` and `gemini-1.5-flash-latest` 50 | 51 | ## 0.11.0 (2024-04-11) 52 | 53 | - Resolve CORS error: Update server configurations to enable Cross-Origin 54 | access. 55 | - Add support for function calls in non-stream mode. 56 | - Migrate to [itty-router](https://github.com/kwhitley/itty-router) to reduce 57 | package size. 58 | - Utilize the official Deno Docker image for improved consistency. 59 | 60 | ## 0.10.0 (2024-02-28) 61 | 62 | - gemini-openai-proxy can now act as a reverse proxy for google gemini, which 63 | can be useful for people in region that don't have access to the google gemini 64 | > it will request `https://generativelanguage.googleapis.com` 65 | 66 | ```shell 67 | curl \ 68 | "http://localhost:8000/v1/models/gemini-pro:generateContent?key=$YOUR_GEMINI_API_KEY" \ 69 | --header 'Content-Type: application/json' \ 70 | --data '{"contents":[{"parts":[{"text":"Hello"}]}]}' 71 | ``` 72 | 73 | - Service settings can now be passed via apikey, currently the first supported 74 | setting is `useBeta` , which can be set like so 75 | > it will use `v1beta` version gemini api, 76 | > 77 | 78 | ```shell 79 | curl http://localhost:8000/v1/chat/completions \ 80 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY#useBeta" \ 81 | -H "Content-Type: application/json" \ 82 | -d '{ 83 | "model": "gpt-3.5-turbo", 84 | "messages": [{"role": "user", "content": "Hello"}], 85 | "temperature": 0.7,"stream":true 86 | }' 87 | ``` 88 | 89 | ## 0.9.0(2024-02-02) 90 | 91 | - change default safe setting config #8 92 | - fix can't pull docker image with no manifest #13 93 | 94 | ## 0.6.0(2024-01-20) 95 | 96 | - fix bun runtime sream model problem, add ployfill for `TextDecoderStream` 97 | > bun does not support `TextDecoderStream` now 98 | > 99 | 100 | - Imporve document 101 | 102 | ## 0.5.0(2024-01-19) 103 | 104 | - Add cloudflare workers support 105 | - Reduce docker image size 106 | - Imporve document 107 | 108 | ## 0.4.0(2024-01-17) 109 | 110 | - Refined the handling of consecutive user messages 111 | 112 | ## 0.3.0(2024-01-16) 113 | 114 | - Add system message support 115 | 116 | ## 0.2.0(2024-01-10) 117 | 118 | - Add stream api support 119 | 120 | ## 0.1.0(2024-01-08) 121 | 122 | - support gemini-pro api 123 | - support gemini-pro-vision api 124 | -------------------------------------------------------------------------------- /deno.jsonc: -------------------------------------------------------------------------------- 1 | { 2 | "exclude": ["./dist", "./coverage"], 3 | "lock": { 4 | "frozen": false 5 | }, 6 | "imports": { 7 | "openapi-format": "https://esm.sh/openapi-format?bundle" 8 | }, 9 | "nodeModulesDir": "auto", 10 | "fmt": { 11 | "exclude": ["*"] 12 | }, 13 | "compilerOptions": { 14 | "useUnknownInCatchVariables": false, 15 | "lib": [ 16 | "ESNext.Array", 17 | "DOM", 18 | "DOM.AsyncIterable", 19 | "DOM.Iterable", 20 | "ESNext", 21 | "ESNext.Disposable", 22 | "ESNext.AsyncIterable", 23 | "deno.ns" 24 | ] 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /deno.lock: -------------------------------------------------------------------------------- 1 | { 2 | "version": "4", 3 | "specifiers": { 4 | "jsr:@std/assert@*": "1.0.11", 5 | "jsr:@std/assert@^1.0.10": "1.0.11", 6 | "jsr:@std/assert@^1.0.11": "1.0.11", 7 | "jsr:@std/expect@*": "1.0.13", 8 | "jsr:@std/fs@*": "1.0.9", 9 | "jsr:@std/internal@^1.0.5": "1.0.5", 10 | "jsr:@std/path@^1.0.8": "1.0.8", 11 | "jsr:@std/testing@*": "1.0.9", 12 | "jsr:@std/yaml@*": "1.0.5", 13 | "npm:@hono/node-server@1.14.0": "1.14.0_hono@4.7.5", 14 | "npm:@total-typescript/ts-reset@0.6.1": "0.6.1", 15 | "npm:esbuild@0.25.2": "0.25.2", 16 | "npm:eventsource-parser@3.0.1": "3.0.1", 17 | "npm:itty-router@5.0.18": "5.0.18" 18 | }, 19 | "jsr": { 20 | "@std/assert@1.0.11": { 21 | "integrity": "2461ef3c368fe88bc60e186e7744a93112f16fd110022e113a0849e94d1c83c1", 22 | "dependencies": [ 23 | "jsr:@std/internal" 24 | ] 25 | }, 26 | "@std/expect@1.0.13": { 27 | "integrity": "d8e236c7089cd9fcf5e6032f27dadc3db6349d0aee48c15bc71d717bca5baa42", 28 | "dependencies": [ 29 | "jsr:@std/assert@^1.0.11", 30 | "jsr:@std/internal" 31 | ] 32 | }, 33 | "@std/fs@1.0.9": { 34 | "integrity": "3eef7e3ed3d317b29432c7dcb3b20122820dbc574263f721cb0248ad91bad890", 35 | "dependencies": [ 36 | "jsr:@std/path" 37 | ] 38 | }, 39 | "@std/internal@1.0.5": { 40 | "integrity": "54a546004f769c1ac9e025abd15a76b6671ddc9687e2313b67376125650dc7ba" 41 | }, 42 | "@std/path@1.0.8": { 43 | "integrity": "548fa456bb6a04d3c1a1e7477986b6cffbce95102d0bb447c67c4ee70e0364be" 44 | }, 45 | "@std/testing@1.0.9": { 46 | "integrity": "9bdd4ac07cb13e7594ac30e90f6ceef7254ac83a9aeaa089be0008f33aab5cd4", 47 | "dependencies": [ 48 | "jsr:@std/assert@^1.0.10", 49 | "jsr:@std/internal" 50 | ] 51 | }, 52 | "@std/yaml@1.0.5": { 53 | "integrity": "71ba3d334305ee2149391931508b2c293a8490f94a337eef3a09cade1a2a2742" 54 | } 55 | }, 56 | "npm": { 57 | "@esbuild/aix-ppc64@0.25.2": { 58 | "integrity": "sha512-wCIboOL2yXZym2cgm6mlA742s9QeJ8DjGVaL39dLN4rRwrOgOyYSnOaFPhKZGLb2ngj4EyfAFjsNJwPXZvseag==" 59 | }, 60 | "@esbuild/android-arm64@0.25.2": { 61 | "integrity": "sha512-5ZAX5xOmTligeBaeNEPnPaeEuah53Id2tX4c2CVP3JaROTH+j4fnfHCkr1PjXMd78hMst+TlkfKcW/DlTq0i4w==" 62 | }, 63 | "@esbuild/android-arm@0.25.2": { 64 | "integrity": "sha512-NQhH7jFstVY5x8CKbcfa166GoV0EFkaPkCKBQkdPJFvo5u+nGXLEH/ooniLb3QI8Fk58YAx7nsPLozUWfCBOJA==" 65 | }, 66 | "@esbuild/android-x64@0.25.2": { 67 | "integrity": "sha512-Ffcx+nnma8Sge4jzddPHCZVRvIfQ0kMsUsCMcJRHkGJ1cDmhe4SsrYIjLUKn1xpHZybmOqCWwB0zQvsjdEHtkg==" 68 | }, 69 | "@esbuild/darwin-arm64@0.25.2": { 70 | "integrity": "sha512-MpM6LUVTXAzOvN4KbjzU/q5smzryuoNjlriAIx+06RpecwCkL9JpenNzpKd2YMzLJFOdPqBpuub6eVRP5IgiSA==" 71 | }, 72 | "@esbuild/darwin-x64@0.25.2": { 73 | "integrity": "sha512-5eRPrTX7wFyuWe8FqEFPG2cU0+butQQVNcT4sVipqjLYQjjh8a8+vUTfgBKM88ObB85ahsnTwF7PSIt6PG+QkA==" 74 | }, 75 | "@esbuild/freebsd-arm64@0.25.2": { 76 | "integrity": "sha512-mLwm4vXKiQ2UTSX4+ImyiPdiHjiZhIaE9QvC7sw0tZ6HoNMjYAqQpGyui5VRIi5sGd+uWq940gdCbY3VLvsO1w==" 77 | }, 78 | "@esbuild/freebsd-x64@0.25.2": { 79 | "integrity": "sha512-6qyyn6TjayJSwGpm8J9QYYGQcRgc90nmfdUb0O7pp1s4lTY+9D0H9O02v5JqGApUyiHOtkz6+1hZNvNtEhbwRQ==" 80 | }, 81 | "@esbuild/linux-arm64@0.25.2": { 82 | "integrity": "sha512-gq/sjLsOyMT19I8obBISvhoYiZIAaGF8JpeXu1u8yPv8BE5HlWYobmlsfijFIZ9hIVGYkbdFhEqC0NvM4kNO0g==" 83 | }, 84 | "@esbuild/linux-arm@0.25.2": { 85 | "integrity": "sha512-UHBRgJcmjJv5oeQF8EpTRZs/1knq6loLxTsjc3nxO9eXAPDLcWW55flrMVc97qFPbmZP31ta1AZVUKQzKTzb0g==" 86 | }, 87 | "@esbuild/linux-ia32@0.25.2": { 88 | "integrity": "sha512-bBYCv9obgW2cBP+2ZWfjYTU+f5cxRoGGQ5SeDbYdFCAZpYWrfjjfYwvUpP8MlKbP0nwZ5gyOU/0aUzZ5HWPuvQ==" 89 | }, 90 | "@esbuild/linux-loong64@0.25.2": { 91 | "integrity": "sha512-SHNGiKtvnU2dBlM5D8CXRFdd+6etgZ9dXfaPCeJtz+37PIUlixvlIhI23L5khKXs3DIzAn9V8v+qb1TRKrgT5w==" 92 | }, 93 | "@esbuild/linux-mips64el@0.25.2": { 94 | "integrity": "sha512-hDDRlzE6rPeoj+5fsADqdUZl1OzqDYow4TB4Y/3PlKBD0ph1e6uPHzIQcv2Z65u2K0kpeByIyAjCmjn1hJgG0Q==" 95 | }, 96 | "@esbuild/linux-ppc64@0.25.2": { 97 | "integrity": "sha512-tsHu2RRSWzipmUi9UBDEzc0nLc4HtpZEI5Ba+Omms5456x5WaNuiG3u7xh5AO6sipnJ9r4cRWQB2tUjPyIkc6g==" 98 | }, 99 | "@esbuild/linux-riscv64@0.25.2": { 100 | "integrity": "sha512-k4LtpgV7NJQOml/10uPU0s4SAXGnowi5qBSjaLWMojNCUICNu7TshqHLAEbkBdAszL5TabfvQ48kK84hyFzjnw==" 101 | }, 102 | "@esbuild/linux-s390x@0.25.2": { 103 | "integrity": "sha512-GRa4IshOdvKY7M/rDpRR3gkiTNp34M0eLTaC1a08gNrh4u488aPhuZOCpkF6+2wl3zAN7L7XIpOFBhnaE3/Q8Q==" 104 | }, 105 | "@esbuild/linux-x64@0.25.2": { 106 | "integrity": "sha512-QInHERlqpTTZ4FRB0fROQWXcYRD64lAoiegezDunLpalZMjcUcld3YzZmVJ2H/Cp0wJRZ8Xtjtj0cEHhYc/uUg==" 107 | }, 108 | "@esbuild/netbsd-arm64@0.25.2": { 109 | "integrity": "sha512-talAIBoY5M8vHc6EeI2WW9d/CkiO9MQJ0IOWX8hrLhxGbro/vBXJvaQXefW2cP0z0nQVTdQ/eNyGFV1GSKrxfw==" 110 | }, 111 | "@esbuild/netbsd-x64@0.25.2": { 112 | "integrity": "sha512-voZT9Z+tpOxrvfKFyfDYPc4DO4rk06qamv1a/fkuzHpiVBMOhpjK+vBmWM8J1eiB3OLSMFYNaOaBNLXGChf5tg==" 113 | }, 114 | "@esbuild/openbsd-arm64@0.25.2": { 115 | "integrity": "sha512-dcXYOC6NXOqcykeDlwId9kB6OkPUxOEqU+rkrYVqJbK2hagWOMrsTGsMr8+rW02M+d5Op5NNlgMmjzecaRf7Tg==" 116 | }, 117 | "@esbuild/openbsd-x64@0.25.2": { 118 | "integrity": "sha512-t/TkWwahkH0Tsgoq1Ju7QfgGhArkGLkF1uYz8nQS/PPFlXbP5YgRpqQR3ARRiC2iXoLTWFxc6DJMSK10dVXluw==" 119 | }, 120 | "@esbuild/sunos-x64@0.25.2": { 121 | "integrity": "sha512-cfZH1co2+imVdWCjd+D1gf9NjkchVhhdpgb1q5y6Hcv9TP6Zi9ZG/beI3ig8TvwT9lH9dlxLq5MQBBgwuj4xvA==" 122 | }, 123 | "@esbuild/win32-arm64@0.25.2": { 124 | "integrity": "sha512-7Loyjh+D/Nx/sOTzV8vfbB3GJuHdOQyrOryFdZvPHLf42Tk9ivBU5Aedi7iyX+x6rbn2Mh68T4qq1SDqJBQO5Q==" 125 | }, 126 | "@esbuild/win32-ia32@0.25.2": { 127 | "integrity": "sha512-WRJgsz9un0nqZJ4MfhabxaD9Ft8KioqU3JMinOTvobbX6MOSUigSBlogP8QB3uxpJDsFS6yN+3FDBdqE5lg9kg==" 128 | }, 129 | "@esbuild/win32-x64@0.25.2": { 130 | "integrity": "sha512-kM3HKb16VIXZyIeVrM1ygYmZBKybX8N4p754bw390wGO3Tf2j4L2/WYL+4suWujpgf6GBYs3jv7TyUivdd05JA==" 131 | }, 132 | "@hono/node-server@1.14.0_hono@4.7.5": { 133 | "integrity": "sha512-YUCxJwgHRKSqjrdTk9e4VMGKN27MK5r4+MGPyZTgKH+IYbK+KtYbHeOcPGJ91KGGD6RIQiz2dAHxvjauNhOS8g==", 134 | "dependencies": [ 135 | "hono" 136 | ] 137 | }, 138 | "@total-typescript/ts-reset@0.6.1": { 139 | "integrity": "sha512-cka47fVSo6lfQDIATYqb/vO1nvFfbPw7uWLayIXIhGETj0wcOOlrlkobOMDNQOFr9QOafegUPq13V2+6vtD7yg==" 140 | }, 141 | "esbuild@0.25.2": { 142 | "integrity": "sha512-16854zccKPnC+toMywC+uKNeYSv+/eXkevRAfwRD/G9Cleq66m8XFIrigkbvauLLlCfDL45Q2cWegSg53gGBnQ==", 143 | "dependencies": [ 144 | "@esbuild/aix-ppc64", 145 | "@esbuild/android-arm", 146 | "@esbuild/android-arm64", 147 | "@esbuild/android-x64", 148 | "@esbuild/darwin-arm64", 149 | "@esbuild/darwin-x64", 150 | "@esbuild/freebsd-arm64", 151 | "@esbuild/freebsd-x64", 152 | "@esbuild/linux-arm", 153 | "@esbuild/linux-arm64", 154 | "@esbuild/linux-ia32", 155 | "@esbuild/linux-loong64", 156 | "@esbuild/linux-mips64el", 157 | "@esbuild/linux-ppc64", 158 | "@esbuild/linux-riscv64", 159 | "@esbuild/linux-s390x", 160 | "@esbuild/linux-x64", 161 | "@esbuild/netbsd-arm64", 162 | "@esbuild/netbsd-x64", 163 | "@esbuild/openbsd-arm64", 164 | "@esbuild/openbsd-x64", 165 | "@esbuild/sunos-x64", 166 | "@esbuild/win32-arm64", 167 | "@esbuild/win32-ia32", 168 | "@esbuild/win32-x64" 169 | ] 170 | }, 171 | "eventsource-parser@3.0.1": { 172 | "integrity": "sha512-VARTJ9CYeuQYb0pZEPbzi740OWFgpHe7AYJ2WFZVnUDUQp5Dk2yJUgF36YsZ81cOyxT0QxmXD2EQpapAouzWVA==" 173 | }, 174 | "hono@4.7.5": { 175 | "integrity": "sha512-fDOK5W2C1vZACsgLONigdZTRZxuBqFtcKh7bUQ5cVSbwI2RWjloJDcgFOVzbQrlI6pCmhlTsVYZ7zpLj4m4qMQ==" 176 | }, 177 | "itty-router@5.0.18": { 178 | "integrity": "sha512-mK3ReOt4ARAGy0V0J7uHmArG2USN2x0zprZ+u+YgmeRjXTDbaowDy3kPcsmQY6tH+uHhDgpWit9Vqmv/4rTXwA==" 179 | } 180 | }, 181 | "redirects": { 182 | "https://esm.sh/buffer-from@^1.0.0?target=denonext": "https://esm.sh/buffer-from@1.1.2?target=denonext", 183 | "https://esm.sh/openapi-format?bundle": "https://esm.sh/openapi-format@1.25.0?bundle", 184 | "https://esm.sh/source-map-support?target=denonext": "https://esm.sh/source-map-support@0.5.21?target=denonext", 185 | "https://esm.sh/source-map@^0.6.0?target=denonext": "https://esm.sh/source-map@0.6.1?target=denonext", 186 | "https://esm.sh/typescript@^5.x?target=denonext": "https://esm.sh/typescript@5.7.3?target=denonext" 187 | }, 188 | "remote": { 189 | "https://esm.sh/buffer-from@1.1.2/denonext/buffer-from.mjs": "dafdfaf9205bdc33bcde4f4a22014fa78b325c07e25d3f0316c8a609076ec2ad", 190 | "https://esm.sh/buffer-from@1.1.2?target=denonext": "0333d3eb82fde733d4b26ae6558a3b7fca98fd7192fc2ffb1c0714522199e02e", 191 | "https://esm.sh/openapi-format@1.25.0/denonext/openapi-format.bundle.mjs": "15addc0b5a4142eb4197dc177f8d294ce7680de34940e38996ac4e5b7cb6a70e", 192 | "https://esm.sh/openapi-format@1.25.0?bundle": "434701331f33915c59194ac5ff5645f16e72cdb53bb10d93535d10914eae6f6b", 193 | "https://esm.sh/openapi-typescript@7.6.1/denonext/openapi-typescript.bundle.mjs": "9d8b10f9b72d18e5c7d394fbe09eeb695288996112fe2476cfc42b6e0775fb7c", 194 | "https://esm.sh/openapi-typescript@7.6.1?bundle": "2cc367e53f4d3a4928f2bda9df8d11a440430f7359714eb1f28a344024ae570c", 195 | "https://esm.sh/source-map-support@0.5.21/denonext/source-map-support.mjs": "07b16eadac43d9ab36c92c0860425b2e07844575fe7c3a19fa0e5c5118d38951", 196 | "https://esm.sh/source-map-support@0.5.21?target=denonext": "abc0f37c6b4b5704ca31e8f1d1679618692911fab29f65a472ae650db67c9307", 197 | "https://esm.sh/source-map@0.6.1/denonext/source-map.mjs": "ed3f2cf0c5f9561924c9b923ea8ffc24709ae31f1a90dd6c67be04c99598f626", 198 | "https://esm.sh/source-map@0.6.1?target=denonext": "3803db0ea481a551d1b0f98b57c3b32597fe2c03a5204fbec3aa58d1892f5fa0", 199 | "https://esm.sh/typescript@5.7.3/denonext/typescript.mjs": "fe6bea3d3c68c97024df7645fb35da2d13d161a0d4eaeeda52509976beb954bd", 200 | "https://esm.sh/typescript@5.7.3?target=denonext": "8d478dd0cfcb7bed6ddcf265f51f6f787c3fec634ba6567bba19f5bd1137b557" 201 | }, 202 | "workspace": { 203 | "packageJson": { 204 | "dependencies": [ 205 | "npm:@hono/node-server@1.14.0", 206 | "npm:@total-typescript/ts-reset@0.6.1", 207 | "npm:esbuild@0.25.2", 208 | "npm:eventsource-parser@3.0.1", 209 | "npm:itty-router@5.0.18" 210 | ] 211 | } 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /dist/main_bun.mjs: -------------------------------------------------------------------------------- 1 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/Router.mjs 2 | var r = ({ base: r2 = "", routes: e2 = [], ...a } = {}) => ({ __proto__: new Proxy({}, { get: (a2, t, o, c) => (a3, ...l) => e2.push([t.toUpperCase?.(), RegExp(`^${(c = (r2 + a3).replace(/\/+(\/|$)/g, "$1")).replace(/(\/?\.?):(\w+)\+/g, "($1(?<$2>*))").replace(/(\/?\.?):(\w+)/g, "($1(?<$2>[^$1/]+?))").replace(/\./g, "\\.").replace(/(\/?)\*/g, "($1.*)?")}/*$`), l, c]) && o }), routes: e2, ...a, async fetch(r3, ...t) { 3 | let o, c, l = new URL(r3.url), p = r3.query = { __proto__: null }; 4 | for (let [r4, e3] of l.searchParams) p[r4] = p[r4] ? [].concat(p[r4], e3) : e3; 5 | r: try { 6 | for (let e3 of a.before || []) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break r; 7 | e: for (let [a2, p2, f, h] of e2) if ((a2 == r3.method || "ALL" == a2) && (c = l.pathname.match(p2))) { 8 | r3.params = c.groups || {}, r3.route = h; 9 | for (let e3 of f) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break e; 10 | } 11 | } catch (e3) { 12 | if (!a.catch) throw e3; 13 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 14 | } 15 | try { 16 | for (let e3 of a.finally || []) o = await e3(o, r3.proxy ?? r3, ...t) ?? o; 17 | } catch (e3) { 18 | if (!a.catch) throw e3; 19 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 20 | } 21 | return o; 22 | } }); 23 | 24 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/cors.mjs 25 | var e = (e2 = {}) => { 26 | const { origin: o = "*", credentials: s = false, allowMethods: c = "*", allowHeaders: r2, exposeHeaders: n, maxAge: t } = e2, a = (e3) => { 27 | const c2 = e3?.headers.get("origin"); 28 | return true === o ? c2 : o instanceof RegExp ? o.test(c2) ? c2 : void 0 : Array.isArray(o) ? o.includes(c2) ? c2 : void 0 : o instanceof Function ? o(c2) : "*" == o && s ? c2 : o; 29 | }, l = (e3, o2) => { 30 | for (const [s2, c2] of Object.entries(o2)) c2 && e3.headers.append(s2, c2); 31 | return e3; 32 | }; 33 | return { corsify: (e3, o2) => e3?.headers?.get("access-control-allow-origin") || 101 == e3.status ? e3 : l(e3.clone(), { "access-control-allow-origin": a(o2), "access-control-allow-credentials": s }), preflight: (e3) => { 34 | if ("OPTIONS" == e3.method) { 35 | const o2 = new Response(null, { status: 204 }); 36 | return l(o2, { "access-control-allow-origin": a(e3), "access-control-allow-methods": c?.join?.(",") ?? c, "access-control-expose-headers": n?.join?.(",") ?? n, "access-control-allow-headers": r2?.join?.(",") ?? r2 ?? e3.headers.get("access-control-request-headers"), "access-control-max-age": t, "access-control-allow-credentials": s }); 37 | } 38 | } }; 39 | }; 40 | 41 | // src/gemini-proxy.ts 42 | async function geminiProxy(rawReq) { 43 | const url = new URL(rawReq.url); 44 | url.host = "generativelanguage.googleapis.com"; 45 | url.port = ""; 46 | url.protocol = "https:"; 47 | const req = new Request(url, rawReq); 48 | const resp = await fetch(req); 49 | return new Response(resp.body, resp); 50 | } 51 | 52 | // src/utils.ts 53 | function getToken(headers) { 54 | for (const [k, v] of headers) { 55 | if (k.toLowerCase() !== "authorization") continue; 56 | const rawApikey = v.substring(v.indexOf(" ") + 1); 57 | if (!rawApikey.includes("#")) { 58 | return { 59 | apikey: rawApikey, 60 | useBeta: false 61 | }; 62 | } 63 | const apikey = rawApikey.substring(0, rawApikey.indexOf("#")); 64 | const params = new URLSearchParams(rawApikey.substring(rawApikey.indexOf("#") + 1)); 65 | return { 66 | apikey, 67 | useBeta: params.has("useBeta") 68 | }; 69 | } 70 | return null; 71 | } 72 | function parseBase64(base64) { 73 | if (!base64.startsWith("data:")) { 74 | return { text: "" }; 75 | } 76 | const [m, data, ..._arr] = base64.split(","); 77 | const mimeType = m.match(/:(?.*?);/)?.groups?.mime ?? "img/png"; 78 | return { 79 | inlineData: { 80 | mimeType, 81 | data 82 | } 83 | }; 84 | } 85 | function openAiMessageToGeminiMessage(messages) { 86 | const result = messages.flatMap(({ role, content }) => { 87 | if (role === "system") { 88 | return [ 89 | { 90 | role: "user", 91 | parts: typeof content !== "string" ? content : [{ text: content }] 92 | } 93 | ]; 94 | } 95 | const parts = content == null || typeof content === "string" ? [{ text: content?.toString() ?? "" }] : content.map((item) => { 96 | if (item.type === "text") return { text: item.text }; 97 | if (item.type === "image_url") return parseBase64(item.image_url.url); 98 | return { text: "OK" }; 99 | }); 100 | return [{ role: "user" === role ? "user" : "model", parts }]; 101 | }); 102 | return result; 103 | } 104 | function genModel(req) { 105 | const model = GeminiModel.modelMapping(req.model); 106 | let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? []; 107 | functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it }))); 108 | const [responseMimeType, responseSchema] = (() => { 109 | switch (req.response_format?.type) { 110 | case "json_object": 111 | return ["application/json", void 0]; 112 | case "json_schema": 113 | return ["application/json", req.response_format.json_schema.schema]; 114 | case "text": 115 | return ["text/plain", void 0]; 116 | default: 117 | return [void 0, void 0]; 118 | } 119 | })(); 120 | const generateContentRequest = { 121 | contents: openAiMessageToGeminiMessage(req.messages), 122 | generationConfig: { 123 | maxOutputTokens: req.max_completion_tokens ?? void 0, 124 | temperature: req.temperature ?? void 0, 125 | topP: req.top_p ?? void 0, 126 | responseMimeType, 127 | responseSchema, 128 | thinkingConfig: !model.isThinkingModel() ? void 0 : { 129 | includeThoughts: true 130 | } 131 | }, 132 | tools: functions.length === 0 ? void 0 : [ 133 | { 134 | functionDeclarations: functions 135 | } 136 | ], 137 | safetySettings: [ 138 | "HARM_CATEGORY_HATE_SPEECH", 139 | "HARM_CATEGORY_SEXUALLY_EXPLICIT", 140 | "HARM_CATEGORY_DANGEROUS_CONTENT", 141 | "HARM_CATEGORY_HARASSMENT" 142 | ].map((category) => ({ 143 | category, 144 | threshold: "BLOCK_NONE" 145 | })) 146 | }; 147 | return [model, generateContentRequest]; 148 | } 149 | var GeminiModel = class _GeminiModel { 150 | static modelMapping(model) { 151 | const modelName = ModelMapping[model ?? ""] ?? _GeminiModel.defaultModel(model ?? ""); 152 | return new _GeminiModel(modelName); 153 | } 154 | model; 155 | constructor(model) { 156 | this.model = model; 157 | } 158 | isThinkingModel() { 159 | return this.model.includes("thinking"); 160 | } 161 | apiVersion() { 162 | if (this.isThinkingModel()) { 163 | return "v1alpha"; 164 | } 165 | return "v1beta"; 166 | } 167 | toString() { 168 | return this.model; 169 | } 170 | static defaultModel(m) { 171 | if (m.startsWith("gemini")) { 172 | return m; 173 | } 174 | return "gemini-1.5-flash-latest"; 175 | } 176 | }; 177 | var ModelMapping = { 178 | "gpt-3.5-turbo": "gemini-1.5-flash-8b-latest", 179 | "gpt-4": "gemini-1.5-pro-latest", 180 | "gpt-4o": "gemini-1.5-flash-latest", 181 | "gpt-4o-mini": "gemini-1.5-flash-8b-latest", 182 | "gpt-4-vision-preview": "gemini-1.5-flash-latest", 183 | "gpt-4-turbo": "gemini-1.5-pro-latest", 184 | "gpt-4-turbo-preview": "gemini-2.0-flash-exp" 185 | }; 186 | function getRuntimeKey() { 187 | const global = globalThis; 188 | if (global?.Deno !== void 0) { 189 | return "deno"; 190 | } 191 | if (global?.Bun !== void 0) { 192 | return "bun"; 193 | } 194 | if (typeof global?.WebSocketPair === "function") { 195 | return "workerd"; 196 | } 197 | if (typeof global?.EdgeRuntime === "string") { 198 | return "edge-light"; 199 | } 200 | if (global?.fastly !== void 0) { 201 | return "fastly"; 202 | } 203 | if (global?.process?.release?.name === "node") { 204 | return "node"; 205 | } 206 | return "other"; 207 | } 208 | 209 | // src/hello.ts 210 | function hello(req) { 211 | const origin = new URL(req.url).origin; 212 | return new Response(` 213 | Hello Gemini-OpenAI-Proxy from ${getRuntimeKey()}! 214 | 215 | You can try it with: 216 | 217 | curl ${origin}/v1/chat/completions \\ 218 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \\ 219 | -H "Content-Type: application/json" \\ 220 | -d '{ 221 | "model": "gpt-3.5-turbo", 222 | "messages": [{"role": "user", "content": "Hello"}], 223 | "temperature": 0.7 224 | }' 225 | `); 226 | } 227 | 228 | // src/log.ts 229 | var LEVEL = ["debug", "info", "warn", "error"]; 230 | var Logger = class { 231 | config; 232 | debug; 233 | info; 234 | warn; 235 | error; 236 | constructor(prefix, logLevel) { 237 | const level = LEVEL.find((it) => it === logLevel) ?? "warn"; 238 | this.config = { 239 | prefix: prefix ?? "", 240 | level 241 | }; 242 | for (const m of LEVEL) { 243 | this[m] = (...data) => this.#write(m, ...data); 244 | } 245 | } 246 | #write(level, ...data) { 247 | const { level: configLevel, prefix } = this.config; 248 | if (LEVEL.indexOf(level) < LEVEL.indexOf(configLevel)) { 249 | return; 250 | } 251 | console[level](`${(/* @__PURE__ */ new Date()).toISOString()} ${level.toUpperCase()}${prefix ? ` ${prefix}` : ""}`, ...data); 252 | } 253 | }; 254 | 255 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/index.js 256 | var ParseError = class extends Error { 257 | constructor(message, options) { 258 | super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line; 259 | } 260 | }; 261 | function noop(_arg) { 262 | } 263 | function createParser(callbacks) { 264 | if (typeof callbacks == "function") 265 | throw new TypeError( 266 | "`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?" 267 | ); 268 | const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks; 269 | let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = ""; 270 | function feed(newChunk) { 271 | const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`); 272 | for (const line of complete) 273 | parseLine(line); 274 | incompleteLine = incomplete, isFirstChunk = false; 275 | } 276 | function parseLine(line) { 277 | if (line === "") { 278 | dispatchEvent(); 279 | return; 280 | } 281 | if (line.startsWith(":")) { 282 | onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1)); 283 | return; 284 | } 285 | const fieldSeparatorIndex = line.indexOf(":"); 286 | if (fieldSeparatorIndex !== -1) { 287 | const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset); 288 | processField(field, value, line); 289 | return; 290 | } 291 | processField(line, "", line); 292 | } 293 | function processField(field, value, line) { 294 | switch (field) { 295 | case "event": 296 | eventType = value; 297 | break; 298 | case "data": 299 | data = `${data}${value} 300 | `; 301 | break; 302 | case "id": 303 | id = value.includes("\0") ? void 0 : value; 304 | break; 305 | case "retry": 306 | /^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError( 307 | new ParseError(`Invalid \`retry\` value: "${value}"`, { 308 | type: "invalid-retry", 309 | value, 310 | line 311 | }) 312 | ); 313 | break; 314 | default: 315 | onError( 316 | new ParseError( 317 | `Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`, 318 | { type: "unknown-field", field, value, line } 319 | ) 320 | ); 321 | break; 322 | } 323 | } 324 | function dispatchEvent() { 325 | data.length > 0 && onEvent({ 326 | id, 327 | event: eventType || void 0, 328 | // If the data buffer's last character is a U+000A LINE FEED (LF) character, 329 | // then remove the last character from the data buffer. 330 | data: data.endsWith(` 331 | `) ? data.slice(0, -1) : data 332 | }), id = void 0, data = "", eventType = ""; 333 | } 334 | function reset(options = {}) { 335 | incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = ""; 336 | } 337 | return { feed, reset }; 338 | } 339 | function splitLines(chunk) { 340 | const lines = []; 341 | let incompleteLine = "", searchIndex = 0; 342 | for (; searchIndex < chunk.length; ) { 343 | const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(` 344 | `, searchIndex); 345 | let lineEnd = -1; 346 | if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) { 347 | incompleteLine = chunk.slice(searchIndex); 348 | break; 349 | } else { 350 | const line = chunk.slice(searchIndex, lineEnd); 351 | lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === ` 352 | ` && searchIndex++; 353 | } 354 | } 355 | return [lines, incompleteLine]; 356 | } 357 | 358 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/stream.js 359 | var EventSourceParserStream = class extends TransformStream { 360 | constructor({ onError, onRetry, onComment } = {}) { 361 | let parser; 362 | super({ 363 | start(controller) { 364 | parser = createParser({ 365 | onEvent: (event) => { 366 | controller.enqueue(event); 367 | }, 368 | onError(error) { 369 | onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error); 370 | }, 371 | onRetry, 372 | onComment 373 | }); 374 | }, 375 | transform(chunk) { 376 | parser.feed(chunk); 377 | } 378 | }); 379 | } 380 | }; 381 | 382 | // src/gemini-api-client/errors.ts 383 | var GoogleGenerativeAIError = class extends Error { 384 | constructor(message) { 385 | super(`[GoogleGenerativeAI Error]: ${message}`); 386 | } 387 | }; 388 | var GoogleGenerativeAIResponseError = class extends GoogleGenerativeAIError { 389 | response; 390 | constructor(message, response) { 391 | super(message); 392 | this.response = response; 393 | } 394 | }; 395 | 396 | // src/gemini-api-client/gemini-api-client.ts 397 | async function* streamGenerateContent(apiParam, model, params, requestOptions) { 398 | const response = await makeRequest( 399 | toURL({ model, task: "streamGenerateContent", stream: true, apiParam }), 400 | JSON.stringify(params), 401 | requestOptions 402 | ); 403 | const body = response.body; 404 | if (body == null) { 405 | return; 406 | } 407 | for await (const event of body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream())) { 408 | const responseJson = JSON.parse(event.data); 409 | yield responseJson; 410 | } 411 | } 412 | async function embedContent(apiParam, model, params, requestOptions) { 413 | const response = await makeRequest( 414 | toURL({ model, task: "embedContent", stream: false, apiParam }), 415 | JSON.stringify(params), 416 | requestOptions 417 | ); 418 | const body = response.body; 419 | if (body == null) { 420 | return; 421 | } 422 | const responseJson = await response.json(); 423 | return responseJson; 424 | } 425 | async function makeRequest(url, body, requestOptions) { 426 | let response; 427 | try { 428 | response = await fetch(url, { 429 | ...buildFetchOptions(requestOptions), 430 | method: "POST", 431 | headers: { 432 | "Content-Type": "application/json" 433 | }, 434 | body 435 | }); 436 | if (!response.ok) { 437 | let message = ""; 438 | try { 439 | const errResp = await response.json(); 440 | message = errResp.error?.message; 441 | if (errResp?.error?.details) { 442 | message += ` ${JSON.stringify(errResp.error.details)}`; 443 | } 444 | } catch (_e) { 445 | } 446 | throw new Error(`[${response.status} ${response.statusText}] ${message}`); 447 | } 448 | } catch (e2) { 449 | console.log(e2); 450 | const err = new GoogleGenerativeAIError(`Error fetching from google -> ${e2.message}`); 451 | err.stack = e2.stack; 452 | throw err; 453 | } 454 | return response; 455 | } 456 | function toURL({ 457 | model, 458 | task, 459 | stream, 460 | apiParam 461 | }) { 462 | const BASE_URL = "https://generativelanguage.googleapis.com"; 463 | const api_version = model.apiVersion(); 464 | const url = new URL(`${BASE_URL}/${api_version}/models/${model}:${task}`); 465 | url.searchParams.append("key", apiParam.apikey); 466 | if (stream) { 467 | url.searchParams.append("alt", "sse"); 468 | } 469 | return url; 470 | } 471 | function buildFetchOptions(requestOptions) { 472 | const fetchOptions = {}; 473 | if (requestOptions?.timeout) { 474 | const abortController = new AbortController(); 475 | const signal = abortController.signal; 476 | setTimeout(() => abortController.abort(), requestOptions.timeout); 477 | fetchOptions.signal = signal; 478 | } 479 | return fetchOptions; 480 | } 481 | 482 | // src/gemini-api-client/response-helper.ts 483 | function resultHelper(response) { 484 | if (response.candidates && response.candidates.length > 0) { 485 | if (response.candidates.length > 1) { 486 | console.warn( 487 | `This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.` 488 | ); 489 | } 490 | if (hadBadFinishReason(response.candidates[0])) { 491 | throw new GoogleGenerativeAIResponseError( 492 | `${formatBlockErrorMessage(response)}`, 493 | response 494 | ); 495 | } 496 | return getText(response); 497 | } 498 | if (response.promptFeedback) { 499 | throw new GoogleGenerativeAIResponseError( 500 | `Text not available. ${formatBlockErrorMessage(response)}`, 501 | response 502 | ); 503 | } 504 | return ""; 505 | } 506 | function getText(response) { 507 | if (response.candidates?.[0].content?.parts?.[0]?.text) { 508 | return response.candidates[0].content.parts[0].text; 509 | } 510 | if (response.candidates?.[0].content?.parts?.[0]?.functionCall) { 511 | return response.candidates[0].content.parts[0].functionCall; 512 | } 513 | return ""; 514 | } 515 | var badFinishReasons = ["RECITATION", "SAFETY"]; 516 | function hadBadFinishReason(candidate) { 517 | return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason); 518 | } 519 | function formatBlockErrorMessage(response) { 520 | let message = ""; 521 | if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) { 522 | message += "Response was blocked"; 523 | if (response.promptFeedback?.blockReason) { 524 | message += ` due to ${response.promptFeedback.blockReason}`; 525 | } 526 | if (response.promptFeedback?.blockReasonMessage) { 527 | message += `: ${response.promptFeedback.blockReasonMessage}`; 528 | } 529 | } else if (response.candidates?.[0]) { 530 | const firstCandidate = response.candidates[0]; 531 | if (hadBadFinishReason(firstCandidate)) { 532 | message += `Candidate was blocked due to ${firstCandidate.finishReason}`; 533 | if (firstCandidate.finishMessage) { 534 | message += `: ${firstCandidate.finishMessage}`; 535 | } 536 | } 537 | } 538 | return message; 539 | } 540 | 541 | // src/openai/chat/completions/NonStreamingChatProxyHandler.ts 542 | async function nonStreamingChatProxyHandler(req, apiParam, log) { 543 | const [model, geminiReq] = genModel(req); 544 | let geminiResp = ""; 545 | try { 546 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 547 | const data = resultHelper(it); 548 | if (typeof data === "string") { 549 | geminiResp += data; 550 | } else { 551 | geminiResp = data; 552 | break; 553 | } 554 | } 555 | } catch (err) { 556 | log?.error(req); 557 | log?.error(err?.message ?? err.toString()); 558 | geminiResp = err?.message ?? err.toString(); 559 | } 560 | log?.debug(req); 561 | log?.debug(geminiResp); 562 | function genOpenAiResp(content) { 563 | if (typeof content === "string") { 564 | return { 565 | id: "chatcmpl-abc123", 566 | object: "chat.completion", 567 | created: Math.floor(Date.now() / 1e3), 568 | model: model.model, 569 | choices: [ 570 | { 571 | message: { role: "assistant", content, refusal: null }, 572 | finish_reason: "stop", 573 | index: 0, 574 | logprobs: null 575 | } 576 | ] 577 | }; 578 | } 579 | return { 580 | id: "chatcmpl-abc123", 581 | object: "chat.completion", 582 | created: Math.floor(Date.now() / 1e3), 583 | model: model.model, 584 | choices: [ 585 | { 586 | message: { 587 | role: "assistant", 588 | refusal: null, 589 | content: null, 590 | function_call: { 591 | name: content.name ?? "", 592 | arguments: JSON.stringify(content.args) 593 | } 594 | }, 595 | finish_reason: "function_call", 596 | index: 0, 597 | logprobs: null 598 | } 599 | ] 600 | }; 601 | } 602 | return Response.json(genOpenAiResp(geminiResp)); 603 | } 604 | 605 | // src/openai/chat/completions/StreamingChatProxyHandler.ts 606 | function streamingChatProxyHandler(req, apiParam, log) { 607 | const [model, geminiReq] = genModel(req); 608 | log?.debug("streamGenerateContent request", req); 609 | return sseResponse( 610 | async function* () { 611 | try { 612 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 613 | log?.debug("streamGenerateContent resp", it); 614 | const data = resultHelper(it); 615 | yield genStreamResp({ 616 | model: model.model, 617 | content: data, 618 | stop: false 619 | }); 620 | } 621 | } catch (error) { 622 | yield genStreamResp({ 623 | model: model.model, 624 | content: error?.message ?? error.toString(), 625 | stop: true 626 | }); 627 | } 628 | yield genStreamResp({ model: model.model, content: "", stop: true }); 629 | yield "[DONE]"; 630 | return void 0; 631 | }() 632 | ); 633 | } 634 | function genStreamResp({ 635 | model, 636 | content, 637 | stop 638 | }) { 639 | if (typeof content === "string") { 640 | return { 641 | id: "chatcmpl-abc123", 642 | object: "chat.completion.chunk", 643 | created: Math.floor(Date.now() / 1e3), 644 | model, 645 | choices: [ 646 | { 647 | delta: { role: "assistant", content }, 648 | finish_reason: stop ? "stop" : null, 649 | index: 0 650 | } 651 | ] 652 | }; 653 | } 654 | return { 655 | id: "chatcmpl-abc123", 656 | object: "chat.completion.chunk", 657 | created: Math.floor(Date.now() / 1e3), 658 | model, 659 | choices: [ 660 | { 661 | delta: { role: "assistant", function_call: content }, 662 | finish_reason: stop ? "function_call" : null, 663 | index: 0 664 | } 665 | ] 666 | }; 667 | } 668 | var encoder = new TextEncoder(); 669 | function sseResponse(dataStream) { 670 | const s = new ReadableStream({ 671 | async pull(controller) { 672 | const { value, done } = await dataStream.next(); 673 | if (done) { 674 | controller.close(); 675 | } else { 676 | const data = typeof value === "string" ? value : JSON.stringify(value); 677 | controller.enqueue(encoder.encode(toSseMsg({ data }))); 678 | } 679 | } 680 | }); 681 | const response = new Response(s, { 682 | status: 200, 683 | headers: new Headers({ 684 | "Content-Type": "text/event-stream" 685 | }) 686 | }); 687 | return response; 688 | } 689 | function toSseMsg({ event, data, id }) { 690 | let result = `data: ${data} 691 | `; 692 | if (event) { 693 | result += `event: ${event ?? ""} 694 | `; 695 | } 696 | if (id) { 697 | result += `id: ${id ?? ""} 698 | `; 699 | } 700 | return `${result} 701 | `; 702 | } 703 | 704 | // src/openai/chat/completions/ChatProxyHandler.ts 705 | async function chatProxyHandler(rawReq) { 706 | const req = await rawReq.json(); 707 | const headers = rawReq.headers; 708 | const apiParam = getToken(headers); 709 | if (apiParam == null) { 710 | return new Response("Unauthorized", { status: 401 }); 711 | } 712 | if (req.stream !== true) { 713 | return await nonStreamingChatProxyHandler(req, apiParam, rawReq.logger); 714 | } 715 | return streamingChatProxyHandler(req, apiParam, rawReq.logger); 716 | } 717 | 718 | // src/openai/embeddingProxyHandler.ts 719 | async function embeddingProxyHandler(rawReq) { 720 | const req = await rawReq.json(); 721 | const log = rawReq.logger; 722 | const headers = rawReq.headers; 723 | const apiParam = getToken(headers); 724 | if (apiParam == null) { 725 | return new Response("Unauthorized", { status: 401 }); 726 | } 727 | const embedContentRequest = { 728 | model: "models/text-embedding-004", 729 | content: { 730 | parts: [req.input].flat().map((it) => ({ text: it.toString() })) 731 | } 732 | }; 733 | log?.warn("request", embedContentRequest); 734 | let geminiResp = []; 735 | try { 736 | const it = await embedContent(apiParam, new GeminiModel("text-embedding-004"), embedContentRequest); 737 | const data = it?.embedding?.values; 738 | geminiResp = data; 739 | } catch (err) { 740 | log?.error(req); 741 | log?.error(err?.message ?? err.toString()); 742 | geminiResp = err?.message ?? err.toString(); 743 | } 744 | log?.debug(req); 745 | log?.debug(geminiResp); 746 | const resp = { 747 | object: "list", 748 | data: [ 749 | { 750 | object: "embedding", 751 | index: 0, 752 | embedding: geminiResp ?? [] 753 | } 754 | ], 755 | model: req.model, 756 | usage: { 757 | prompt_tokens: 5, 758 | total_tokens: 5 759 | } 760 | }; 761 | return Response.json(resp); 762 | } 763 | 764 | // src/openai/models.ts 765 | var modelData = Object.keys(ModelMapping).map((model) => ({ 766 | created: 1677610602, 767 | object: "model", 768 | owned_by: "openai", 769 | id: model 770 | })); 771 | var models = () => { 772 | return { 773 | object: "list", 774 | data: modelData 775 | }; 776 | }; 777 | var modelDetail = (model) => { 778 | return modelData.find((it) => it.id === model); 779 | }; 780 | 781 | // src/app.ts 782 | var { preflight, corsify } = e({ allowHeaders: "*" }); 783 | var app = r({ 784 | before: [ 785 | preflight, 786 | (req) => { 787 | req.logger = new Logger(crypto.randomUUID().toString()); 788 | req.logger.warn(`--> ${req.method} ${req.url}`); 789 | } 790 | ], 791 | finally: [ 792 | corsify, 793 | (_, req) => { 794 | req.logger?.warn(`<-- ${req.method} ${req.url}`); 795 | } 796 | ] 797 | }); 798 | app.get("/", hello); 799 | app.post("/v1/chat/completions", chatProxyHandler); 800 | app.post("/v1/embeddings", embeddingProxyHandler); 801 | app.get("/v1/models", () => Response.json(models())); 802 | app.get("/v1/models/:model", (c) => Response.json(modelDetail(c.params.model))); 803 | app.post("/:model_version/models/:model_and_action", geminiProxy); 804 | app.all("*", () => new Response("Page Not Found", { status: 404 })); 805 | 806 | // main_bun.ts 807 | console.log("Listening on http://localhost:8000/"); 808 | Bun.serve({ 809 | port: 8e3, 810 | fetch: app.fetch 811 | }); 812 | -------------------------------------------------------------------------------- /dist/main_cloudflare-workers.mjs: -------------------------------------------------------------------------------- 1 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/Router.mjs 2 | var r = ({ base: r2 = "", routes: e2 = [], ...a } = {}) => ({ __proto__: new Proxy({}, { get: (a2, t, o, c) => (a3, ...l) => e2.push([t.toUpperCase?.(), RegExp(`^${(c = (r2 + a3).replace(/\/+(\/|$)/g, "$1")).replace(/(\/?\.?):(\w+)\+/g, "($1(?<$2>*))").replace(/(\/?\.?):(\w+)/g, "($1(?<$2>[^$1/]+?))").replace(/\./g, "\\.").replace(/(\/?)\*/g, "($1.*)?")}/*$`), l, c]) && o }), routes: e2, ...a, async fetch(r3, ...t) { 3 | let o, c, l = new URL(r3.url), p = r3.query = { __proto__: null }; 4 | for (let [r4, e3] of l.searchParams) p[r4] = p[r4] ? [].concat(p[r4], e3) : e3; 5 | r: try { 6 | for (let e3 of a.before || []) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break r; 7 | e: for (let [a2, p2, f, h] of e2) if ((a2 == r3.method || "ALL" == a2) && (c = l.pathname.match(p2))) { 8 | r3.params = c.groups || {}, r3.route = h; 9 | for (let e3 of f) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break e; 10 | } 11 | } catch (e3) { 12 | if (!a.catch) throw e3; 13 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 14 | } 15 | try { 16 | for (let e3 of a.finally || []) o = await e3(o, r3.proxy ?? r3, ...t) ?? o; 17 | } catch (e3) { 18 | if (!a.catch) throw e3; 19 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 20 | } 21 | return o; 22 | } }); 23 | 24 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/cors.mjs 25 | var e = (e2 = {}) => { 26 | const { origin: o = "*", credentials: s = false, allowMethods: c = "*", allowHeaders: r2, exposeHeaders: n, maxAge: t } = e2, a = (e3) => { 27 | const c2 = e3?.headers.get("origin"); 28 | return true === o ? c2 : o instanceof RegExp ? o.test(c2) ? c2 : void 0 : Array.isArray(o) ? o.includes(c2) ? c2 : void 0 : o instanceof Function ? o(c2) : "*" == o && s ? c2 : o; 29 | }, l = (e3, o2) => { 30 | for (const [s2, c2] of Object.entries(o2)) c2 && e3.headers.append(s2, c2); 31 | return e3; 32 | }; 33 | return { corsify: (e3, o2) => e3?.headers?.get("access-control-allow-origin") || 101 == e3.status ? e3 : l(e3.clone(), { "access-control-allow-origin": a(o2), "access-control-allow-credentials": s }), preflight: (e3) => { 34 | if ("OPTIONS" == e3.method) { 35 | const o2 = new Response(null, { status: 204 }); 36 | return l(o2, { "access-control-allow-origin": a(e3), "access-control-allow-methods": c?.join?.(",") ?? c, "access-control-expose-headers": n?.join?.(",") ?? n, "access-control-allow-headers": r2?.join?.(",") ?? r2 ?? e3.headers.get("access-control-request-headers"), "access-control-max-age": t, "access-control-allow-credentials": s }); 37 | } 38 | } }; 39 | }; 40 | 41 | // src/gemini-proxy.ts 42 | async function geminiProxy(rawReq) { 43 | const url = new URL(rawReq.url); 44 | url.host = "generativelanguage.googleapis.com"; 45 | url.port = ""; 46 | url.protocol = "https:"; 47 | const req = new Request(url, rawReq); 48 | const resp = await fetch(req); 49 | return new Response(resp.body, resp); 50 | } 51 | 52 | // src/utils.ts 53 | function getToken(headers) { 54 | for (const [k, v] of headers) { 55 | if (k.toLowerCase() !== "authorization") continue; 56 | const rawApikey = v.substring(v.indexOf(" ") + 1); 57 | if (!rawApikey.includes("#")) { 58 | return { 59 | apikey: rawApikey, 60 | useBeta: false 61 | }; 62 | } 63 | const apikey = rawApikey.substring(0, rawApikey.indexOf("#")); 64 | const params = new URLSearchParams(rawApikey.substring(rawApikey.indexOf("#") + 1)); 65 | return { 66 | apikey, 67 | useBeta: params.has("useBeta") 68 | }; 69 | } 70 | return null; 71 | } 72 | function parseBase64(base64) { 73 | if (!base64.startsWith("data:")) { 74 | return { text: "" }; 75 | } 76 | const [m, data, ..._arr] = base64.split(","); 77 | const mimeType = m.match(/:(?.*?);/)?.groups?.mime ?? "img/png"; 78 | return { 79 | inlineData: { 80 | mimeType, 81 | data 82 | } 83 | }; 84 | } 85 | function openAiMessageToGeminiMessage(messages) { 86 | const result = messages.flatMap(({ role, content }) => { 87 | if (role === "system") { 88 | return [ 89 | { 90 | role: "user", 91 | parts: typeof content !== "string" ? content : [{ text: content }] 92 | } 93 | ]; 94 | } 95 | const parts = content == null || typeof content === "string" ? [{ text: content?.toString() ?? "" }] : content.map((item) => { 96 | if (item.type === "text") return { text: item.text }; 97 | if (item.type === "image_url") return parseBase64(item.image_url.url); 98 | return { text: "OK" }; 99 | }); 100 | return [{ role: "user" === role ? "user" : "model", parts }]; 101 | }); 102 | return result; 103 | } 104 | function genModel(req) { 105 | const model = GeminiModel.modelMapping(req.model); 106 | let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? []; 107 | functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it }))); 108 | const [responseMimeType, responseSchema] = (() => { 109 | switch (req.response_format?.type) { 110 | case "json_object": 111 | return ["application/json", void 0]; 112 | case "json_schema": 113 | return ["application/json", req.response_format.json_schema.schema]; 114 | case "text": 115 | return ["text/plain", void 0]; 116 | default: 117 | return [void 0, void 0]; 118 | } 119 | })(); 120 | const generateContentRequest = { 121 | contents: openAiMessageToGeminiMessage(req.messages), 122 | generationConfig: { 123 | maxOutputTokens: req.max_completion_tokens ?? void 0, 124 | temperature: req.temperature ?? void 0, 125 | topP: req.top_p ?? void 0, 126 | responseMimeType, 127 | responseSchema, 128 | thinkingConfig: !model.isThinkingModel() ? void 0 : { 129 | includeThoughts: true 130 | } 131 | }, 132 | tools: functions.length === 0 ? void 0 : [ 133 | { 134 | functionDeclarations: functions 135 | } 136 | ], 137 | safetySettings: [ 138 | "HARM_CATEGORY_HATE_SPEECH", 139 | "HARM_CATEGORY_SEXUALLY_EXPLICIT", 140 | "HARM_CATEGORY_DANGEROUS_CONTENT", 141 | "HARM_CATEGORY_HARASSMENT" 142 | ].map((category) => ({ 143 | category, 144 | threshold: "BLOCK_NONE" 145 | })) 146 | }; 147 | return [model, generateContentRequest]; 148 | } 149 | var GeminiModel = class _GeminiModel { 150 | static modelMapping(model) { 151 | const modelName = ModelMapping[model ?? ""] ?? _GeminiModel.defaultModel(model ?? ""); 152 | return new _GeminiModel(modelName); 153 | } 154 | model; 155 | constructor(model) { 156 | this.model = model; 157 | } 158 | isThinkingModel() { 159 | return this.model.includes("thinking"); 160 | } 161 | apiVersion() { 162 | if (this.isThinkingModel()) { 163 | return "v1alpha"; 164 | } 165 | return "v1beta"; 166 | } 167 | toString() { 168 | return this.model; 169 | } 170 | static defaultModel(m) { 171 | if (m.startsWith("gemini")) { 172 | return m; 173 | } 174 | return "gemini-1.5-flash-latest"; 175 | } 176 | }; 177 | var ModelMapping = { 178 | "gpt-3.5-turbo": "gemini-1.5-flash-8b-latest", 179 | "gpt-4": "gemini-1.5-pro-latest", 180 | "gpt-4o": "gemini-1.5-flash-latest", 181 | "gpt-4o-mini": "gemini-1.5-flash-8b-latest", 182 | "gpt-4-vision-preview": "gemini-1.5-flash-latest", 183 | "gpt-4-turbo": "gemini-1.5-pro-latest", 184 | "gpt-4-turbo-preview": "gemini-2.0-flash-exp" 185 | }; 186 | function getRuntimeKey() { 187 | const global = globalThis; 188 | if (global?.Deno !== void 0) { 189 | return "deno"; 190 | } 191 | if (global?.Bun !== void 0) { 192 | return "bun"; 193 | } 194 | if (typeof global?.WebSocketPair === "function") { 195 | return "workerd"; 196 | } 197 | if (typeof global?.EdgeRuntime === "string") { 198 | return "edge-light"; 199 | } 200 | if (global?.fastly !== void 0) { 201 | return "fastly"; 202 | } 203 | if (global?.process?.release?.name === "node") { 204 | return "node"; 205 | } 206 | return "other"; 207 | } 208 | 209 | // src/hello.ts 210 | function hello(req) { 211 | const origin = new URL(req.url).origin; 212 | return new Response(` 213 | Hello Gemini-OpenAI-Proxy from ${getRuntimeKey()}! 214 | 215 | You can try it with: 216 | 217 | curl ${origin}/v1/chat/completions \\ 218 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \\ 219 | -H "Content-Type: application/json" \\ 220 | -d '{ 221 | "model": "gpt-3.5-turbo", 222 | "messages": [{"role": "user", "content": "Hello"}], 223 | "temperature": 0.7 224 | }' 225 | `); 226 | } 227 | 228 | // src/log.ts 229 | var LEVEL = ["debug", "info", "warn", "error"]; 230 | var Logger = class { 231 | config; 232 | debug; 233 | info; 234 | warn; 235 | error; 236 | constructor(prefix, logLevel) { 237 | const level = LEVEL.find((it) => it === logLevel) ?? "warn"; 238 | this.config = { 239 | prefix: prefix ?? "", 240 | level 241 | }; 242 | for (const m of LEVEL) { 243 | this[m] = (...data) => this.#write(m, ...data); 244 | } 245 | } 246 | #write(level, ...data) { 247 | const { level: configLevel, prefix } = this.config; 248 | if (LEVEL.indexOf(level) < LEVEL.indexOf(configLevel)) { 249 | return; 250 | } 251 | console[level](`${(/* @__PURE__ */ new Date()).toISOString()} ${level.toUpperCase()}${prefix ? ` ${prefix}` : ""}`, ...data); 252 | } 253 | }; 254 | 255 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/index.js 256 | var ParseError = class extends Error { 257 | constructor(message, options) { 258 | super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line; 259 | } 260 | }; 261 | function noop(_arg) { 262 | } 263 | function createParser(callbacks) { 264 | if (typeof callbacks == "function") 265 | throw new TypeError( 266 | "`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?" 267 | ); 268 | const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks; 269 | let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = ""; 270 | function feed(newChunk) { 271 | const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`); 272 | for (const line of complete) 273 | parseLine(line); 274 | incompleteLine = incomplete, isFirstChunk = false; 275 | } 276 | function parseLine(line) { 277 | if (line === "") { 278 | dispatchEvent(); 279 | return; 280 | } 281 | if (line.startsWith(":")) { 282 | onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1)); 283 | return; 284 | } 285 | const fieldSeparatorIndex = line.indexOf(":"); 286 | if (fieldSeparatorIndex !== -1) { 287 | const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset); 288 | processField(field, value, line); 289 | return; 290 | } 291 | processField(line, "", line); 292 | } 293 | function processField(field, value, line) { 294 | switch (field) { 295 | case "event": 296 | eventType = value; 297 | break; 298 | case "data": 299 | data = `${data}${value} 300 | `; 301 | break; 302 | case "id": 303 | id = value.includes("\0") ? void 0 : value; 304 | break; 305 | case "retry": 306 | /^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError( 307 | new ParseError(`Invalid \`retry\` value: "${value}"`, { 308 | type: "invalid-retry", 309 | value, 310 | line 311 | }) 312 | ); 313 | break; 314 | default: 315 | onError( 316 | new ParseError( 317 | `Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`, 318 | { type: "unknown-field", field, value, line } 319 | ) 320 | ); 321 | break; 322 | } 323 | } 324 | function dispatchEvent() { 325 | data.length > 0 && onEvent({ 326 | id, 327 | event: eventType || void 0, 328 | // If the data buffer's last character is a U+000A LINE FEED (LF) character, 329 | // then remove the last character from the data buffer. 330 | data: data.endsWith(` 331 | `) ? data.slice(0, -1) : data 332 | }), id = void 0, data = "", eventType = ""; 333 | } 334 | function reset(options = {}) { 335 | incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = ""; 336 | } 337 | return { feed, reset }; 338 | } 339 | function splitLines(chunk) { 340 | const lines = []; 341 | let incompleteLine = "", searchIndex = 0; 342 | for (; searchIndex < chunk.length; ) { 343 | const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(` 344 | `, searchIndex); 345 | let lineEnd = -1; 346 | if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) { 347 | incompleteLine = chunk.slice(searchIndex); 348 | break; 349 | } else { 350 | const line = chunk.slice(searchIndex, lineEnd); 351 | lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === ` 352 | ` && searchIndex++; 353 | } 354 | } 355 | return [lines, incompleteLine]; 356 | } 357 | 358 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/stream.js 359 | var EventSourceParserStream = class extends TransformStream { 360 | constructor({ onError, onRetry, onComment } = {}) { 361 | let parser; 362 | super({ 363 | start(controller) { 364 | parser = createParser({ 365 | onEvent: (event) => { 366 | controller.enqueue(event); 367 | }, 368 | onError(error) { 369 | onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error); 370 | }, 371 | onRetry, 372 | onComment 373 | }); 374 | }, 375 | transform(chunk) { 376 | parser.feed(chunk); 377 | } 378 | }); 379 | } 380 | }; 381 | 382 | // src/gemini-api-client/errors.ts 383 | var GoogleGenerativeAIError = class extends Error { 384 | constructor(message) { 385 | super(`[GoogleGenerativeAI Error]: ${message}`); 386 | } 387 | }; 388 | var GoogleGenerativeAIResponseError = class extends GoogleGenerativeAIError { 389 | response; 390 | constructor(message, response) { 391 | super(message); 392 | this.response = response; 393 | } 394 | }; 395 | 396 | // src/gemini-api-client/gemini-api-client.ts 397 | async function* streamGenerateContent(apiParam, model, params, requestOptions) { 398 | const response = await makeRequest( 399 | toURL({ model, task: "streamGenerateContent", stream: true, apiParam }), 400 | JSON.stringify(params), 401 | requestOptions 402 | ); 403 | const body = response.body; 404 | if (body == null) { 405 | return; 406 | } 407 | for await (const event of body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream())) { 408 | const responseJson = JSON.parse(event.data); 409 | yield responseJson; 410 | } 411 | } 412 | async function embedContent(apiParam, model, params, requestOptions) { 413 | const response = await makeRequest( 414 | toURL({ model, task: "embedContent", stream: false, apiParam }), 415 | JSON.stringify(params), 416 | requestOptions 417 | ); 418 | const body = response.body; 419 | if (body == null) { 420 | return; 421 | } 422 | const responseJson = await response.json(); 423 | return responseJson; 424 | } 425 | async function makeRequest(url, body, requestOptions) { 426 | let response; 427 | try { 428 | response = await fetch(url, { 429 | ...buildFetchOptions(requestOptions), 430 | method: "POST", 431 | headers: { 432 | "Content-Type": "application/json" 433 | }, 434 | body 435 | }); 436 | if (!response.ok) { 437 | let message = ""; 438 | try { 439 | const errResp = await response.json(); 440 | message = errResp.error?.message; 441 | if (errResp?.error?.details) { 442 | message += ` ${JSON.stringify(errResp.error.details)}`; 443 | } 444 | } catch (_e) { 445 | } 446 | throw new Error(`[${response.status} ${response.statusText}] ${message}`); 447 | } 448 | } catch (e2) { 449 | console.log(e2); 450 | const err = new GoogleGenerativeAIError(`Error fetching from google -> ${e2.message}`); 451 | err.stack = e2.stack; 452 | throw err; 453 | } 454 | return response; 455 | } 456 | function toURL({ 457 | model, 458 | task, 459 | stream, 460 | apiParam 461 | }) { 462 | const BASE_URL = "https://generativelanguage.googleapis.com"; 463 | const api_version = model.apiVersion(); 464 | const url = new URL(`${BASE_URL}/${api_version}/models/${model}:${task}`); 465 | url.searchParams.append("key", apiParam.apikey); 466 | if (stream) { 467 | url.searchParams.append("alt", "sse"); 468 | } 469 | return url; 470 | } 471 | function buildFetchOptions(requestOptions) { 472 | const fetchOptions = {}; 473 | if (requestOptions?.timeout) { 474 | const abortController = new AbortController(); 475 | const signal = abortController.signal; 476 | setTimeout(() => abortController.abort(), requestOptions.timeout); 477 | fetchOptions.signal = signal; 478 | } 479 | return fetchOptions; 480 | } 481 | 482 | // src/gemini-api-client/response-helper.ts 483 | function resultHelper(response) { 484 | if (response.candidates && response.candidates.length > 0) { 485 | if (response.candidates.length > 1) { 486 | console.warn( 487 | `This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.` 488 | ); 489 | } 490 | if (hadBadFinishReason(response.candidates[0])) { 491 | throw new GoogleGenerativeAIResponseError( 492 | `${formatBlockErrorMessage(response)}`, 493 | response 494 | ); 495 | } 496 | return getText(response); 497 | } 498 | if (response.promptFeedback) { 499 | throw new GoogleGenerativeAIResponseError( 500 | `Text not available. ${formatBlockErrorMessage(response)}`, 501 | response 502 | ); 503 | } 504 | return ""; 505 | } 506 | function getText(response) { 507 | if (response.candidates?.[0].content?.parts?.[0]?.text) { 508 | return response.candidates[0].content.parts[0].text; 509 | } 510 | if (response.candidates?.[0].content?.parts?.[0]?.functionCall) { 511 | return response.candidates[0].content.parts[0].functionCall; 512 | } 513 | return ""; 514 | } 515 | var badFinishReasons = ["RECITATION", "SAFETY"]; 516 | function hadBadFinishReason(candidate) { 517 | return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason); 518 | } 519 | function formatBlockErrorMessage(response) { 520 | let message = ""; 521 | if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) { 522 | message += "Response was blocked"; 523 | if (response.promptFeedback?.blockReason) { 524 | message += ` due to ${response.promptFeedback.blockReason}`; 525 | } 526 | if (response.promptFeedback?.blockReasonMessage) { 527 | message += `: ${response.promptFeedback.blockReasonMessage}`; 528 | } 529 | } else if (response.candidates?.[0]) { 530 | const firstCandidate = response.candidates[0]; 531 | if (hadBadFinishReason(firstCandidate)) { 532 | message += `Candidate was blocked due to ${firstCandidate.finishReason}`; 533 | if (firstCandidate.finishMessage) { 534 | message += `: ${firstCandidate.finishMessage}`; 535 | } 536 | } 537 | } 538 | return message; 539 | } 540 | 541 | // src/openai/chat/completions/NonStreamingChatProxyHandler.ts 542 | async function nonStreamingChatProxyHandler(req, apiParam, log) { 543 | const [model, geminiReq] = genModel(req); 544 | let geminiResp = ""; 545 | try { 546 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 547 | const data = resultHelper(it); 548 | if (typeof data === "string") { 549 | geminiResp += data; 550 | } else { 551 | geminiResp = data; 552 | break; 553 | } 554 | } 555 | } catch (err) { 556 | log?.error(req); 557 | log?.error(err?.message ?? err.toString()); 558 | geminiResp = err?.message ?? err.toString(); 559 | } 560 | log?.debug(req); 561 | log?.debug(geminiResp); 562 | function genOpenAiResp(content) { 563 | if (typeof content === "string") { 564 | return { 565 | id: "chatcmpl-abc123", 566 | object: "chat.completion", 567 | created: Math.floor(Date.now() / 1e3), 568 | model: model.model, 569 | choices: [ 570 | { 571 | message: { role: "assistant", content, refusal: null }, 572 | finish_reason: "stop", 573 | index: 0, 574 | logprobs: null 575 | } 576 | ] 577 | }; 578 | } 579 | return { 580 | id: "chatcmpl-abc123", 581 | object: "chat.completion", 582 | created: Math.floor(Date.now() / 1e3), 583 | model: model.model, 584 | choices: [ 585 | { 586 | message: { 587 | role: "assistant", 588 | refusal: null, 589 | content: null, 590 | function_call: { 591 | name: content.name ?? "", 592 | arguments: JSON.stringify(content.args) 593 | } 594 | }, 595 | finish_reason: "function_call", 596 | index: 0, 597 | logprobs: null 598 | } 599 | ] 600 | }; 601 | } 602 | return Response.json(genOpenAiResp(geminiResp)); 603 | } 604 | 605 | // src/openai/chat/completions/StreamingChatProxyHandler.ts 606 | function streamingChatProxyHandler(req, apiParam, log) { 607 | const [model, geminiReq] = genModel(req); 608 | log?.debug("streamGenerateContent request", req); 609 | return sseResponse( 610 | async function* () { 611 | try { 612 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 613 | log?.debug("streamGenerateContent resp", it); 614 | const data = resultHelper(it); 615 | yield genStreamResp({ 616 | model: model.model, 617 | content: data, 618 | stop: false 619 | }); 620 | } 621 | } catch (error) { 622 | yield genStreamResp({ 623 | model: model.model, 624 | content: error?.message ?? error.toString(), 625 | stop: true 626 | }); 627 | } 628 | yield genStreamResp({ model: model.model, content: "", stop: true }); 629 | yield "[DONE]"; 630 | return void 0; 631 | }() 632 | ); 633 | } 634 | function genStreamResp({ 635 | model, 636 | content, 637 | stop 638 | }) { 639 | if (typeof content === "string") { 640 | return { 641 | id: "chatcmpl-abc123", 642 | object: "chat.completion.chunk", 643 | created: Math.floor(Date.now() / 1e3), 644 | model, 645 | choices: [ 646 | { 647 | delta: { role: "assistant", content }, 648 | finish_reason: stop ? "stop" : null, 649 | index: 0 650 | } 651 | ] 652 | }; 653 | } 654 | return { 655 | id: "chatcmpl-abc123", 656 | object: "chat.completion.chunk", 657 | created: Math.floor(Date.now() / 1e3), 658 | model, 659 | choices: [ 660 | { 661 | delta: { role: "assistant", function_call: content }, 662 | finish_reason: stop ? "function_call" : null, 663 | index: 0 664 | } 665 | ] 666 | }; 667 | } 668 | var encoder = new TextEncoder(); 669 | function sseResponse(dataStream) { 670 | const s = new ReadableStream({ 671 | async pull(controller) { 672 | const { value, done } = await dataStream.next(); 673 | if (done) { 674 | controller.close(); 675 | } else { 676 | const data = typeof value === "string" ? value : JSON.stringify(value); 677 | controller.enqueue(encoder.encode(toSseMsg({ data }))); 678 | } 679 | } 680 | }); 681 | const response = new Response(s, { 682 | status: 200, 683 | headers: new Headers({ 684 | "Content-Type": "text/event-stream" 685 | }) 686 | }); 687 | return response; 688 | } 689 | function toSseMsg({ event, data, id }) { 690 | let result = `data: ${data} 691 | `; 692 | if (event) { 693 | result += `event: ${event ?? ""} 694 | `; 695 | } 696 | if (id) { 697 | result += `id: ${id ?? ""} 698 | `; 699 | } 700 | return `${result} 701 | `; 702 | } 703 | 704 | // src/openai/chat/completions/ChatProxyHandler.ts 705 | async function chatProxyHandler(rawReq) { 706 | const req = await rawReq.json(); 707 | const headers = rawReq.headers; 708 | const apiParam = getToken(headers); 709 | if (apiParam == null) { 710 | return new Response("Unauthorized", { status: 401 }); 711 | } 712 | if (req.stream !== true) { 713 | return await nonStreamingChatProxyHandler(req, apiParam, rawReq.logger); 714 | } 715 | return streamingChatProxyHandler(req, apiParam, rawReq.logger); 716 | } 717 | 718 | // src/openai/embeddingProxyHandler.ts 719 | async function embeddingProxyHandler(rawReq) { 720 | const req = await rawReq.json(); 721 | const log = rawReq.logger; 722 | const headers = rawReq.headers; 723 | const apiParam = getToken(headers); 724 | if (apiParam == null) { 725 | return new Response("Unauthorized", { status: 401 }); 726 | } 727 | const embedContentRequest = { 728 | model: "models/text-embedding-004", 729 | content: { 730 | parts: [req.input].flat().map((it) => ({ text: it.toString() })) 731 | } 732 | }; 733 | log?.warn("request", embedContentRequest); 734 | let geminiResp = []; 735 | try { 736 | const it = await embedContent(apiParam, new GeminiModel("text-embedding-004"), embedContentRequest); 737 | const data = it?.embedding?.values; 738 | geminiResp = data; 739 | } catch (err) { 740 | log?.error(req); 741 | log?.error(err?.message ?? err.toString()); 742 | geminiResp = err?.message ?? err.toString(); 743 | } 744 | log?.debug(req); 745 | log?.debug(geminiResp); 746 | const resp = { 747 | object: "list", 748 | data: [ 749 | { 750 | object: "embedding", 751 | index: 0, 752 | embedding: geminiResp ?? [] 753 | } 754 | ], 755 | model: req.model, 756 | usage: { 757 | prompt_tokens: 5, 758 | total_tokens: 5 759 | } 760 | }; 761 | return Response.json(resp); 762 | } 763 | 764 | // src/openai/models.ts 765 | var modelData = Object.keys(ModelMapping).map((model) => ({ 766 | created: 1677610602, 767 | object: "model", 768 | owned_by: "openai", 769 | id: model 770 | })); 771 | var models = () => { 772 | return { 773 | object: "list", 774 | data: modelData 775 | }; 776 | }; 777 | var modelDetail = (model) => { 778 | return modelData.find((it) => it.id === model); 779 | }; 780 | 781 | // src/app.ts 782 | var { preflight, corsify } = e({ allowHeaders: "*" }); 783 | var app = r({ 784 | before: [ 785 | preflight, 786 | (req) => { 787 | req.logger = new Logger(crypto.randomUUID().toString()); 788 | req.logger.warn(`--> ${req.method} ${req.url}`); 789 | } 790 | ], 791 | finally: [ 792 | corsify, 793 | (_, req) => { 794 | req.logger?.warn(`<-- ${req.method} ${req.url}`); 795 | } 796 | ] 797 | }); 798 | app.get("/", hello); 799 | app.post("/v1/chat/completions", chatProxyHandler); 800 | app.post("/v1/embeddings", embeddingProxyHandler); 801 | app.get("/v1/models", () => Response.json(models())); 802 | app.get("/v1/models/:model", (c) => Response.json(modelDetail(c.params.model))); 803 | app.post("/:model_version/models/:model_and_action", geminiProxy); 804 | app.all("*", () => new Response("Page Not Found", { status: 404 })); 805 | 806 | // main_cloudflare-workers.ts 807 | var main_cloudflare_workers_default = { 808 | fetch: app.fetch 809 | }; 810 | export { 811 | main_cloudflare_workers_default as default 812 | }; 813 | -------------------------------------------------------------------------------- /dist/main_deno.mjs: -------------------------------------------------------------------------------- 1 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/Router.mjs 2 | var r = ({ base: r2 = "", routes: e2 = [], ...a } = {}) => ({ __proto__: new Proxy({}, { get: (a2, t, o, c) => (a3, ...l) => e2.push([t.toUpperCase?.(), RegExp(`^${(c = (r2 + a3).replace(/\/+(\/|$)/g, "$1")).replace(/(\/?\.?):(\w+)\+/g, "($1(?<$2>*))").replace(/(\/?\.?):(\w+)/g, "($1(?<$2>[^$1/]+?))").replace(/\./g, "\\.").replace(/(\/?)\*/g, "($1.*)?")}/*$`), l, c]) && o }), routes: e2, ...a, async fetch(r3, ...t) { 3 | let o, c, l = new URL(r3.url), p = r3.query = { __proto__: null }; 4 | for (let [r4, e3] of l.searchParams) p[r4] = p[r4] ? [].concat(p[r4], e3) : e3; 5 | r: try { 6 | for (let e3 of a.before || []) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break r; 7 | e: for (let [a2, p2, f, h] of e2) if ((a2 == r3.method || "ALL" == a2) && (c = l.pathname.match(p2))) { 8 | r3.params = c.groups || {}, r3.route = h; 9 | for (let e3 of f) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break e; 10 | } 11 | } catch (e3) { 12 | if (!a.catch) throw e3; 13 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 14 | } 15 | try { 16 | for (let e3 of a.finally || []) o = await e3(o, r3.proxy ?? r3, ...t) ?? o; 17 | } catch (e3) { 18 | if (!a.catch) throw e3; 19 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 20 | } 21 | return o; 22 | } }); 23 | 24 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/cors.mjs 25 | var e = (e2 = {}) => { 26 | const { origin: o = "*", credentials: s = false, allowMethods: c = "*", allowHeaders: r2, exposeHeaders: n, maxAge: t } = e2, a = (e3) => { 27 | const c2 = e3?.headers.get("origin"); 28 | return true === o ? c2 : o instanceof RegExp ? o.test(c2) ? c2 : void 0 : Array.isArray(o) ? o.includes(c2) ? c2 : void 0 : o instanceof Function ? o(c2) : "*" == o && s ? c2 : o; 29 | }, l = (e3, o2) => { 30 | for (const [s2, c2] of Object.entries(o2)) c2 && e3.headers.append(s2, c2); 31 | return e3; 32 | }; 33 | return { corsify: (e3, o2) => e3?.headers?.get("access-control-allow-origin") || 101 == e3.status ? e3 : l(e3.clone(), { "access-control-allow-origin": a(o2), "access-control-allow-credentials": s }), preflight: (e3) => { 34 | if ("OPTIONS" == e3.method) { 35 | const o2 = new Response(null, { status: 204 }); 36 | return l(o2, { "access-control-allow-origin": a(e3), "access-control-allow-methods": c?.join?.(",") ?? c, "access-control-expose-headers": n?.join?.(",") ?? n, "access-control-allow-headers": r2?.join?.(",") ?? r2 ?? e3.headers.get("access-control-request-headers"), "access-control-max-age": t, "access-control-allow-credentials": s }); 37 | } 38 | } }; 39 | }; 40 | 41 | // src/gemini-proxy.ts 42 | async function geminiProxy(rawReq) { 43 | const url = new URL(rawReq.url); 44 | url.host = "generativelanguage.googleapis.com"; 45 | url.port = ""; 46 | url.protocol = "https:"; 47 | const req = new Request(url, rawReq); 48 | const resp = await fetch(req); 49 | return new Response(resp.body, resp); 50 | } 51 | 52 | // src/utils.ts 53 | function getToken(headers) { 54 | for (const [k, v] of headers) { 55 | if (k.toLowerCase() !== "authorization") continue; 56 | const rawApikey = v.substring(v.indexOf(" ") + 1); 57 | if (!rawApikey.includes("#")) { 58 | return { 59 | apikey: rawApikey, 60 | useBeta: false 61 | }; 62 | } 63 | const apikey = rawApikey.substring(0, rawApikey.indexOf("#")); 64 | const params = new URLSearchParams(rawApikey.substring(rawApikey.indexOf("#") + 1)); 65 | return { 66 | apikey, 67 | useBeta: params.has("useBeta") 68 | }; 69 | } 70 | return null; 71 | } 72 | function parseBase64(base64) { 73 | if (!base64.startsWith("data:")) { 74 | return { text: "" }; 75 | } 76 | const [m, data, ..._arr] = base64.split(","); 77 | const mimeType = m.match(/:(?.*?);/)?.groups?.mime ?? "img/png"; 78 | return { 79 | inlineData: { 80 | mimeType, 81 | data 82 | } 83 | }; 84 | } 85 | function openAiMessageToGeminiMessage(messages) { 86 | const result = messages.flatMap(({ role, content }) => { 87 | if (role === "system") { 88 | return [ 89 | { 90 | role: "user", 91 | parts: typeof content !== "string" ? content : [{ text: content }] 92 | } 93 | ]; 94 | } 95 | const parts = content == null || typeof content === "string" ? [{ text: content?.toString() ?? "" }] : content.map((item) => { 96 | if (item.type === "text") return { text: item.text }; 97 | if (item.type === "image_url") return parseBase64(item.image_url.url); 98 | return { text: "OK" }; 99 | }); 100 | return [{ role: "user" === role ? "user" : "model", parts }]; 101 | }); 102 | return result; 103 | } 104 | function genModel(req) { 105 | const model = GeminiModel.modelMapping(req.model); 106 | let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? []; 107 | functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it }))); 108 | const [responseMimeType, responseSchema] = (() => { 109 | switch (req.response_format?.type) { 110 | case "json_object": 111 | return ["application/json", void 0]; 112 | case "json_schema": 113 | return ["application/json", req.response_format.json_schema.schema]; 114 | case "text": 115 | return ["text/plain", void 0]; 116 | default: 117 | return [void 0, void 0]; 118 | } 119 | })(); 120 | const generateContentRequest = { 121 | contents: openAiMessageToGeminiMessage(req.messages), 122 | generationConfig: { 123 | maxOutputTokens: req.max_completion_tokens ?? void 0, 124 | temperature: req.temperature ?? void 0, 125 | topP: req.top_p ?? void 0, 126 | responseMimeType, 127 | responseSchema, 128 | thinkingConfig: !model.isThinkingModel() ? void 0 : { 129 | includeThoughts: true 130 | } 131 | }, 132 | tools: functions.length === 0 ? void 0 : [ 133 | { 134 | functionDeclarations: functions 135 | } 136 | ], 137 | safetySettings: [ 138 | "HARM_CATEGORY_HATE_SPEECH", 139 | "HARM_CATEGORY_SEXUALLY_EXPLICIT", 140 | "HARM_CATEGORY_DANGEROUS_CONTENT", 141 | "HARM_CATEGORY_HARASSMENT" 142 | ].map((category) => ({ 143 | category, 144 | threshold: "BLOCK_NONE" 145 | })) 146 | }; 147 | return [model, generateContentRequest]; 148 | } 149 | var GeminiModel = class _GeminiModel { 150 | static modelMapping(model) { 151 | const modelName = ModelMapping[model ?? ""] ?? _GeminiModel.defaultModel(model ?? ""); 152 | return new _GeminiModel(modelName); 153 | } 154 | model; 155 | constructor(model) { 156 | this.model = model; 157 | } 158 | isThinkingModel() { 159 | return this.model.includes("thinking"); 160 | } 161 | apiVersion() { 162 | if (this.isThinkingModel()) { 163 | return "v1alpha"; 164 | } 165 | return "v1beta"; 166 | } 167 | toString() { 168 | return this.model; 169 | } 170 | static defaultModel(m) { 171 | if (m.startsWith("gemini")) { 172 | return m; 173 | } 174 | return "gemini-1.5-flash-latest"; 175 | } 176 | }; 177 | var ModelMapping = { 178 | "gpt-3.5-turbo": "gemini-1.5-flash-8b-latest", 179 | "gpt-4": "gemini-1.5-pro-latest", 180 | "gpt-4o": "gemini-1.5-flash-latest", 181 | "gpt-4o-mini": "gemini-1.5-flash-8b-latest", 182 | "gpt-4-vision-preview": "gemini-1.5-flash-latest", 183 | "gpt-4-turbo": "gemini-1.5-pro-latest", 184 | "gpt-4-turbo-preview": "gemini-2.0-flash-exp" 185 | }; 186 | function getRuntimeKey() { 187 | const global = globalThis; 188 | if (global?.Deno !== void 0) { 189 | return "deno"; 190 | } 191 | if (global?.Bun !== void 0) { 192 | return "bun"; 193 | } 194 | if (typeof global?.WebSocketPair === "function") { 195 | return "workerd"; 196 | } 197 | if (typeof global?.EdgeRuntime === "string") { 198 | return "edge-light"; 199 | } 200 | if (global?.fastly !== void 0) { 201 | return "fastly"; 202 | } 203 | if (global?.process?.release?.name === "node") { 204 | return "node"; 205 | } 206 | return "other"; 207 | } 208 | 209 | // src/hello.ts 210 | function hello(req) { 211 | const origin = new URL(req.url).origin; 212 | return new Response(` 213 | Hello Gemini-OpenAI-Proxy from ${getRuntimeKey()}! 214 | 215 | You can try it with: 216 | 217 | curl ${origin}/v1/chat/completions \\ 218 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \\ 219 | -H "Content-Type: application/json" \\ 220 | -d '{ 221 | "model": "gpt-3.5-turbo", 222 | "messages": [{"role": "user", "content": "Hello"}], 223 | "temperature": 0.7 224 | }' 225 | `); 226 | } 227 | 228 | // src/log.ts 229 | var LEVEL = ["debug", "info", "warn", "error"]; 230 | var Logger = class { 231 | config; 232 | debug; 233 | info; 234 | warn; 235 | error; 236 | constructor(prefix, logLevel) { 237 | const level = LEVEL.find((it) => it === logLevel) ?? "warn"; 238 | this.config = { 239 | prefix: prefix ?? "", 240 | level 241 | }; 242 | for (const m of LEVEL) { 243 | this[m] = (...data) => this.#write(m, ...data); 244 | } 245 | } 246 | #write(level, ...data) { 247 | const { level: configLevel, prefix } = this.config; 248 | if (LEVEL.indexOf(level) < LEVEL.indexOf(configLevel)) { 249 | return; 250 | } 251 | console[level](`${(/* @__PURE__ */ new Date()).toISOString()} ${level.toUpperCase()}${prefix ? ` ${prefix}` : ""}`, ...data); 252 | } 253 | }; 254 | 255 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/index.js 256 | var ParseError = class extends Error { 257 | constructor(message, options) { 258 | super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line; 259 | } 260 | }; 261 | function noop(_arg) { 262 | } 263 | function createParser(callbacks) { 264 | if (typeof callbacks == "function") 265 | throw new TypeError( 266 | "`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?" 267 | ); 268 | const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks; 269 | let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = ""; 270 | function feed(newChunk) { 271 | const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`); 272 | for (const line of complete) 273 | parseLine(line); 274 | incompleteLine = incomplete, isFirstChunk = false; 275 | } 276 | function parseLine(line) { 277 | if (line === "") { 278 | dispatchEvent(); 279 | return; 280 | } 281 | if (line.startsWith(":")) { 282 | onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1)); 283 | return; 284 | } 285 | const fieldSeparatorIndex = line.indexOf(":"); 286 | if (fieldSeparatorIndex !== -1) { 287 | const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset); 288 | processField(field, value, line); 289 | return; 290 | } 291 | processField(line, "", line); 292 | } 293 | function processField(field, value, line) { 294 | switch (field) { 295 | case "event": 296 | eventType = value; 297 | break; 298 | case "data": 299 | data = `${data}${value} 300 | `; 301 | break; 302 | case "id": 303 | id = value.includes("\0") ? void 0 : value; 304 | break; 305 | case "retry": 306 | /^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError( 307 | new ParseError(`Invalid \`retry\` value: "${value}"`, { 308 | type: "invalid-retry", 309 | value, 310 | line 311 | }) 312 | ); 313 | break; 314 | default: 315 | onError( 316 | new ParseError( 317 | `Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`, 318 | { type: "unknown-field", field, value, line } 319 | ) 320 | ); 321 | break; 322 | } 323 | } 324 | function dispatchEvent() { 325 | data.length > 0 && onEvent({ 326 | id, 327 | event: eventType || void 0, 328 | // If the data buffer's last character is a U+000A LINE FEED (LF) character, 329 | // then remove the last character from the data buffer. 330 | data: data.endsWith(` 331 | `) ? data.slice(0, -1) : data 332 | }), id = void 0, data = "", eventType = ""; 333 | } 334 | function reset(options = {}) { 335 | incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = ""; 336 | } 337 | return { feed, reset }; 338 | } 339 | function splitLines(chunk) { 340 | const lines = []; 341 | let incompleteLine = "", searchIndex = 0; 342 | for (; searchIndex < chunk.length; ) { 343 | const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(` 344 | `, searchIndex); 345 | let lineEnd = -1; 346 | if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) { 347 | incompleteLine = chunk.slice(searchIndex); 348 | break; 349 | } else { 350 | const line = chunk.slice(searchIndex, lineEnd); 351 | lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === ` 352 | ` && searchIndex++; 353 | } 354 | } 355 | return [lines, incompleteLine]; 356 | } 357 | 358 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/stream.js 359 | var EventSourceParserStream = class extends TransformStream { 360 | constructor({ onError, onRetry, onComment } = {}) { 361 | let parser; 362 | super({ 363 | start(controller) { 364 | parser = createParser({ 365 | onEvent: (event) => { 366 | controller.enqueue(event); 367 | }, 368 | onError(error) { 369 | onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error); 370 | }, 371 | onRetry, 372 | onComment 373 | }); 374 | }, 375 | transform(chunk) { 376 | parser.feed(chunk); 377 | } 378 | }); 379 | } 380 | }; 381 | 382 | // src/gemini-api-client/errors.ts 383 | var GoogleGenerativeAIError = class extends Error { 384 | constructor(message) { 385 | super(`[GoogleGenerativeAI Error]: ${message}`); 386 | } 387 | }; 388 | var GoogleGenerativeAIResponseError = class extends GoogleGenerativeAIError { 389 | response; 390 | constructor(message, response) { 391 | super(message); 392 | this.response = response; 393 | } 394 | }; 395 | 396 | // src/gemini-api-client/gemini-api-client.ts 397 | async function* streamGenerateContent(apiParam, model, params, requestOptions) { 398 | const response = await makeRequest( 399 | toURL({ model, task: "streamGenerateContent", stream: true, apiParam }), 400 | JSON.stringify(params), 401 | requestOptions 402 | ); 403 | const body = response.body; 404 | if (body == null) { 405 | return; 406 | } 407 | for await (const event of body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream())) { 408 | const responseJson = JSON.parse(event.data); 409 | yield responseJson; 410 | } 411 | } 412 | async function embedContent(apiParam, model, params, requestOptions) { 413 | const response = await makeRequest( 414 | toURL({ model, task: "embedContent", stream: false, apiParam }), 415 | JSON.stringify(params), 416 | requestOptions 417 | ); 418 | const body = response.body; 419 | if (body == null) { 420 | return; 421 | } 422 | const responseJson = await response.json(); 423 | return responseJson; 424 | } 425 | async function makeRequest(url, body, requestOptions) { 426 | let response; 427 | try { 428 | response = await fetch(url, { 429 | ...buildFetchOptions(requestOptions), 430 | method: "POST", 431 | headers: { 432 | "Content-Type": "application/json" 433 | }, 434 | body 435 | }); 436 | if (!response.ok) { 437 | let message = ""; 438 | try { 439 | const errResp = await response.json(); 440 | message = errResp.error?.message; 441 | if (errResp?.error?.details) { 442 | message += ` ${JSON.stringify(errResp.error.details)}`; 443 | } 444 | } catch (_e) { 445 | } 446 | throw new Error(`[${response.status} ${response.statusText}] ${message}`); 447 | } 448 | } catch (e2) { 449 | console.log(e2); 450 | const err = new GoogleGenerativeAIError(`Error fetching from google -> ${e2.message}`); 451 | err.stack = e2.stack; 452 | throw err; 453 | } 454 | return response; 455 | } 456 | function toURL({ 457 | model, 458 | task, 459 | stream, 460 | apiParam 461 | }) { 462 | const BASE_URL = "https://generativelanguage.googleapis.com"; 463 | const api_version = model.apiVersion(); 464 | const url = new URL(`${BASE_URL}/${api_version}/models/${model}:${task}`); 465 | url.searchParams.append("key", apiParam.apikey); 466 | if (stream) { 467 | url.searchParams.append("alt", "sse"); 468 | } 469 | return url; 470 | } 471 | function buildFetchOptions(requestOptions) { 472 | const fetchOptions = {}; 473 | if (requestOptions?.timeout) { 474 | const abortController = new AbortController(); 475 | const signal = abortController.signal; 476 | setTimeout(() => abortController.abort(), requestOptions.timeout); 477 | fetchOptions.signal = signal; 478 | } 479 | return fetchOptions; 480 | } 481 | 482 | // src/gemini-api-client/response-helper.ts 483 | function resultHelper(response) { 484 | if (response.candidates && response.candidates.length > 0) { 485 | if (response.candidates.length > 1) { 486 | console.warn( 487 | `This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.` 488 | ); 489 | } 490 | if (hadBadFinishReason(response.candidates[0])) { 491 | throw new GoogleGenerativeAIResponseError( 492 | `${formatBlockErrorMessage(response)}`, 493 | response 494 | ); 495 | } 496 | return getText(response); 497 | } 498 | if (response.promptFeedback) { 499 | throw new GoogleGenerativeAIResponseError( 500 | `Text not available. ${formatBlockErrorMessage(response)}`, 501 | response 502 | ); 503 | } 504 | return ""; 505 | } 506 | function getText(response) { 507 | if (response.candidates?.[0].content?.parts?.[0]?.text) { 508 | return response.candidates[0].content.parts[0].text; 509 | } 510 | if (response.candidates?.[0].content?.parts?.[0]?.functionCall) { 511 | return response.candidates[0].content.parts[0].functionCall; 512 | } 513 | return ""; 514 | } 515 | var badFinishReasons = ["RECITATION", "SAFETY"]; 516 | function hadBadFinishReason(candidate) { 517 | return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason); 518 | } 519 | function formatBlockErrorMessage(response) { 520 | let message = ""; 521 | if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) { 522 | message += "Response was blocked"; 523 | if (response.promptFeedback?.blockReason) { 524 | message += ` due to ${response.promptFeedback.blockReason}`; 525 | } 526 | if (response.promptFeedback?.blockReasonMessage) { 527 | message += `: ${response.promptFeedback.blockReasonMessage}`; 528 | } 529 | } else if (response.candidates?.[0]) { 530 | const firstCandidate = response.candidates[0]; 531 | if (hadBadFinishReason(firstCandidate)) { 532 | message += `Candidate was blocked due to ${firstCandidate.finishReason}`; 533 | if (firstCandidate.finishMessage) { 534 | message += `: ${firstCandidate.finishMessage}`; 535 | } 536 | } 537 | } 538 | return message; 539 | } 540 | 541 | // src/openai/chat/completions/NonStreamingChatProxyHandler.ts 542 | async function nonStreamingChatProxyHandler(req, apiParam, log) { 543 | const [model, geminiReq] = genModel(req); 544 | let geminiResp = ""; 545 | try { 546 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 547 | const data = resultHelper(it); 548 | if (typeof data === "string") { 549 | geminiResp += data; 550 | } else { 551 | geminiResp = data; 552 | break; 553 | } 554 | } 555 | } catch (err) { 556 | log?.error(req); 557 | log?.error(err?.message ?? err.toString()); 558 | geminiResp = err?.message ?? err.toString(); 559 | } 560 | log?.debug(req); 561 | log?.debug(geminiResp); 562 | function genOpenAiResp(content) { 563 | if (typeof content === "string") { 564 | return { 565 | id: "chatcmpl-abc123", 566 | object: "chat.completion", 567 | created: Math.floor(Date.now() / 1e3), 568 | model: model.model, 569 | choices: [ 570 | { 571 | message: { role: "assistant", content, refusal: null }, 572 | finish_reason: "stop", 573 | index: 0, 574 | logprobs: null 575 | } 576 | ] 577 | }; 578 | } 579 | return { 580 | id: "chatcmpl-abc123", 581 | object: "chat.completion", 582 | created: Math.floor(Date.now() / 1e3), 583 | model: model.model, 584 | choices: [ 585 | { 586 | message: { 587 | role: "assistant", 588 | refusal: null, 589 | content: null, 590 | function_call: { 591 | name: content.name ?? "", 592 | arguments: JSON.stringify(content.args) 593 | } 594 | }, 595 | finish_reason: "function_call", 596 | index: 0, 597 | logprobs: null 598 | } 599 | ] 600 | }; 601 | } 602 | return Response.json(genOpenAiResp(geminiResp)); 603 | } 604 | 605 | // src/openai/chat/completions/StreamingChatProxyHandler.ts 606 | function streamingChatProxyHandler(req, apiParam, log) { 607 | const [model, geminiReq] = genModel(req); 608 | log?.debug("streamGenerateContent request", req); 609 | return sseResponse( 610 | async function* () { 611 | try { 612 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 613 | log?.debug("streamGenerateContent resp", it); 614 | const data = resultHelper(it); 615 | yield genStreamResp({ 616 | model: model.model, 617 | content: data, 618 | stop: false 619 | }); 620 | } 621 | } catch (error) { 622 | yield genStreamResp({ 623 | model: model.model, 624 | content: error?.message ?? error.toString(), 625 | stop: true 626 | }); 627 | } 628 | yield genStreamResp({ model: model.model, content: "", stop: true }); 629 | yield "[DONE]"; 630 | return void 0; 631 | }() 632 | ); 633 | } 634 | function genStreamResp({ 635 | model, 636 | content, 637 | stop 638 | }) { 639 | if (typeof content === "string") { 640 | return { 641 | id: "chatcmpl-abc123", 642 | object: "chat.completion.chunk", 643 | created: Math.floor(Date.now() / 1e3), 644 | model, 645 | choices: [ 646 | { 647 | delta: { role: "assistant", content }, 648 | finish_reason: stop ? "stop" : null, 649 | index: 0 650 | } 651 | ] 652 | }; 653 | } 654 | return { 655 | id: "chatcmpl-abc123", 656 | object: "chat.completion.chunk", 657 | created: Math.floor(Date.now() / 1e3), 658 | model, 659 | choices: [ 660 | { 661 | delta: { role: "assistant", function_call: content }, 662 | finish_reason: stop ? "function_call" : null, 663 | index: 0 664 | } 665 | ] 666 | }; 667 | } 668 | var encoder = new TextEncoder(); 669 | function sseResponse(dataStream) { 670 | const s = new ReadableStream({ 671 | async pull(controller) { 672 | const { value, done } = await dataStream.next(); 673 | if (done) { 674 | controller.close(); 675 | } else { 676 | const data = typeof value === "string" ? value : JSON.stringify(value); 677 | controller.enqueue(encoder.encode(toSseMsg({ data }))); 678 | } 679 | } 680 | }); 681 | const response = new Response(s, { 682 | status: 200, 683 | headers: new Headers({ 684 | "Content-Type": "text/event-stream" 685 | }) 686 | }); 687 | return response; 688 | } 689 | function toSseMsg({ event, data, id }) { 690 | let result = `data: ${data} 691 | `; 692 | if (event) { 693 | result += `event: ${event ?? ""} 694 | `; 695 | } 696 | if (id) { 697 | result += `id: ${id ?? ""} 698 | `; 699 | } 700 | return `${result} 701 | `; 702 | } 703 | 704 | // src/openai/chat/completions/ChatProxyHandler.ts 705 | async function chatProxyHandler(rawReq) { 706 | const req = await rawReq.json(); 707 | const headers = rawReq.headers; 708 | const apiParam = getToken(headers); 709 | if (apiParam == null) { 710 | return new Response("Unauthorized", { status: 401 }); 711 | } 712 | if (req.stream !== true) { 713 | return await nonStreamingChatProxyHandler(req, apiParam, rawReq.logger); 714 | } 715 | return streamingChatProxyHandler(req, apiParam, rawReq.logger); 716 | } 717 | 718 | // src/openai/embeddingProxyHandler.ts 719 | async function embeddingProxyHandler(rawReq) { 720 | const req = await rawReq.json(); 721 | const log = rawReq.logger; 722 | const headers = rawReq.headers; 723 | const apiParam = getToken(headers); 724 | if (apiParam == null) { 725 | return new Response("Unauthorized", { status: 401 }); 726 | } 727 | const embedContentRequest = { 728 | model: "models/text-embedding-004", 729 | content: { 730 | parts: [req.input].flat().map((it) => ({ text: it.toString() })) 731 | } 732 | }; 733 | log?.warn("request", embedContentRequest); 734 | let geminiResp = []; 735 | try { 736 | const it = await embedContent(apiParam, new GeminiModel("text-embedding-004"), embedContentRequest); 737 | const data = it?.embedding?.values; 738 | geminiResp = data; 739 | } catch (err) { 740 | log?.error(req); 741 | log?.error(err?.message ?? err.toString()); 742 | geminiResp = err?.message ?? err.toString(); 743 | } 744 | log?.debug(req); 745 | log?.debug(geminiResp); 746 | const resp = { 747 | object: "list", 748 | data: [ 749 | { 750 | object: "embedding", 751 | index: 0, 752 | embedding: geminiResp ?? [] 753 | } 754 | ], 755 | model: req.model, 756 | usage: { 757 | prompt_tokens: 5, 758 | total_tokens: 5 759 | } 760 | }; 761 | return Response.json(resp); 762 | } 763 | 764 | // src/openai/models.ts 765 | var modelData = Object.keys(ModelMapping).map((model) => ({ 766 | created: 1677610602, 767 | object: "model", 768 | owned_by: "openai", 769 | id: model 770 | })); 771 | var models = () => { 772 | return { 773 | object: "list", 774 | data: modelData 775 | }; 776 | }; 777 | var modelDetail = (model) => { 778 | return modelData.find((it) => it.id === model); 779 | }; 780 | 781 | // src/app.ts 782 | var { preflight, corsify } = e({ allowHeaders: "*" }); 783 | var app = r({ 784 | before: [ 785 | preflight, 786 | (req) => { 787 | req.logger = new Logger(crypto.randomUUID().toString()); 788 | req.logger.warn(`--> ${req.method} ${req.url}`); 789 | } 790 | ], 791 | finally: [ 792 | corsify, 793 | (_, req) => { 794 | req.logger?.warn(`<-- ${req.method} ${req.url}`); 795 | } 796 | ] 797 | }); 798 | app.get("/", hello); 799 | app.post("/v1/chat/completions", chatProxyHandler); 800 | app.post("/v1/embeddings", embeddingProxyHandler); 801 | app.get("/v1/models", () => Response.json(models())); 802 | app.get("/v1/models/:model", (c) => Response.json(modelDetail(c.params.model))); 803 | app.post("/:model_version/models/:model_and_action", geminiProxy); 804 | app.all("*", () => new Response("Page Not Found", { status: 404 })); 805 | 806 | // main_deno.ts 807 | Deno.serve({ port: 8e3 }, app.fetch); 808 | -------------------------------------------------------------------------------- /dist/main_node.mjs: -------------------------------------------------------------------------------- 1 | // node_modules/.deno/@hono+node-server@1.14.0/node_modules/@hono/node-server/dist/index.mjs 2 | import { createServer as createServerHTTP } from "http"; 3 | import { Http2ServerRequest } from "http2"; 4 | import { Readable } from "stream"; 5 | import crypto2 from "crypto"; 6 | var RequestError = class extends Error { 7 | static name = "RequestError"; 8 | constructor(message, options) { 9 | super(message, options); 10 | } 11 | }; 12 | var toRequestError = (e2) => { 13 | if (e2 instanceof RequestError) { 14 | return e2; 15 | } 16 | return new RequestError(e2.message, { cause: e2 }); 17 | }; 18 | var GlobalRequest = global.Request; 19 | var Request2 = class extends GlobalRequest { 20 | constructor(input, options) { 21 | if (typeof input === "object" && getRequestCache in input) { 22 | input = input[getRequestCache](); 23 | } 24 | if (typeof options?.body?.getReader !== "undefined") { 25 | ; 26 | options.duplex ??= "half"; 27 | } 28 | super(input, options); 29 | } 30 | }; 31 | var newRequestFromIncoming = (method, url, incoming, abortController) => { 32 | const headerRecord = []; 33 | const rawHeaders = incoming.rawHeaders; 34 | for (let i = 0; i < rawHeaders.length; i += 2) { 35 | const { [i]: key, [i + 1]: value } = rawHeaders; 36 | if (key.charCodeAt(0) !== /*:*/ 37 | 58) { 38 | headerRecord.push([key, value]); 39 | } 40 | } 41 | const init = { 42 | method, 43 | headers: headerRecord, 44 | signal: abortController.signal 45 | }; 46 | if (method === "TRACE") { 47 | init.method = "GET"; 48 | const req = new Request2(url, init); 49 | Object.defineProperty(req, "method", { 50 | get() { 51 | return "TRACE"; 52 | } 53 | }); 54 | return req; 55 | } 56 | if (!(method === "GET" || method === "HEAD")) { 57 | if ("rawBody" in incoming && incoming.rawBody instanceof Buffer) { 58 | init.body = new ReadableStream({ 59 | start(controller) { 60 | controller.enqueue(incoming.rawBody); 61 | controller.close(); 62 | } 63 | }); 64 | } else { 65 | init.body = Readable.toWeb(incoming); 66 | } 67 | } 68 | return new Request2(url, init); 69 | }; 70 | var getRequestCache = Symbol("getRequestCache"); 71 | var requestCache = Symbol("requestCache"); 72 | var incomingKey = Symbol("incomingKey"); 73 | var urlKey = Symbol("urlKey"); 74 | var abortControllerKey = Symbol("abortControllerKey"); 75 | var getAbortController = Symbol("getAbortController"); 76 | var requestPrototype = { 77 | get method() { 78 | return this[incomingKey].method || "GET"; 79 | }, 80 | get url() { 81 | return this[urlKey]; 82 | }, 83 | [getAbortController]() { 84 | this[getRequestCache](); 85 | return this[abortControllerKey]; 86 | }, 87 | [getRequestCache]() { 88 | this[abortControllerKey] ||= new AbortController(); 89 | return this[requestCache] ||= newRequestFromIncoming( 90 | this.method, 91 | this[urlKey], 92 | this[incomingKey], 93 | this[abortControllerKey] 94 | ); 95 | } 96 | }; 97 | [ 98 | "body", 99 | "bodyUsed", 100 | "cache", 101 | "credentials", 102 | "destination", 103 | "headers", 104 | "integrity", 105 | "mode", 106 | "redirect", 107 | "referrer", 108 | "referrerPolicy", 109 | "signal", 110 | "keepalive" 111 | ].forEach((k) => { 112 | Object.defineProperty(requestPrototype, k, { 113 | get() { 114 | return this[getRequestCache]()[k]; 115 | } 116 | }); 117 | }); 118 | ["arrayBuffer", "blob", "clone", "formData", "json", "text"].forEach((k) => { 119 | Object.defineProperty(requestPrototype, k, { 120 | value: function() { 121 | return this[getRequestCache]()[k](); 122 | } 123 | }); 124 | }); 125 | Object.setPrototypeOf(requestPrototype, Request2.prototype); 126 | var newRequest = (incoming, defaultHostname) => { 127 | const req = Object.create(requestPrototype); 128 | req[incomingKey] = incoming; 129 | const incomingUrl = incoming.url || ""; 130 | if (incomingUrl[0] !== "/" && // short-circuit for performance. most requests are relative URL. 131 | (incomingUrl.startsWith("http://") || incomingUrl.startsWith("https://"))) { 132 | if (incoming instanceof Http2ServerRequest) { 133 | throw new RequestError("Absolute URL for :path is not allowed in HTTP/2"); 134 | } 135 | try { 136 | const url2 = new URL(incomingUrl); 137 | req[urlKey] = url2.href; 138 | } catch (e2) { 139 | throw new RequestError("Invalid absolute URL", { cause: e2 }); 140 | } 141 | return req; 142 | } 143 | const host = (incoming instanceof Http2ServerRequest ? incoming.authority : incoming.headers.host) || defaultHostname; 144 | if (!host) { 145 | throw new RequestError("Missing host header"); 146 | } 147 | let scheme; 148 | if (incoming instanceof Http2ServerRequest) { 149 | scheme = incoming.scheme; 150 | if (!(scheme === "http" || scheme === "https")) { 151 | throw new RequestError("Unsupported scheme"); 152 | } 153 | } else { 154 | scheme = incoming.socket && incoming.socket.encrypted ? "https" : "http"; 155 | } 156 | const url = new URL(`${scheme}://${host}${incomingUrl}`); 157 | if (url.hostname.length !== host.length && url.hostname !== host.replace(/:\d+$/, "")) { 158 | throw new RequestError("Invalid host header"); 159 | } 160 | req[urlKey] = url.href; 161 | return req; 162 | }; 163 | function writeFromReadableStream(stream, writable) { 164 | if (stream.locked) { 165 | throw new TypeError("ReadableStream is locked."); 166 | } else if (writable.destroyed) { 167 | stream.cancel(); 168 | return; 169 | } 170 | const reader = stream.getReader(); 171 | writable.on("close", cancel); 172 | writable.on("error", cancel); 173 | reader.read().then(flow, cancel); 174 | return reader.closed.finally(() => { 175 | writable.off("close", cancel); 176 | writable.off("error", cancel); 177 | }); 178 | function cancel(error) { 179 | reader.cancel(error).catch(() => { 180 | }); 181 | if (error) { 182 | writable.destroy(error); 183 | } 184 | } 185 | function onDrain() { 186 | reader.read().then(flow, cancel); 187 | } 188 | function flow({ done, value }) { 189 | try { 190 | if (done) { 191 | writable.end(); 192 | } else if (!writable.write(value)) { 193 | writable.once("drain", onDrain); 194 | } else { 195 | return reader.read().then(flow, cancel); 196 | } 197 | } catch (e2) { 198 | cancel(e2); 199 | } 200 | } 201 | } 202 | var buildOutgoingHttpHeaders = (headers) => { 203 | const res = {}; 204 | if (!(headers instanceof Headers)) { 205 | headers = new Headers(headers ?? void 0); 206 | } 207 | const cookies = []; 208 | for (const [k, v] of headers) { 209 | if (k === "set-cookie") { 210 | cookies.push(v); 211 | } else { 212 | res[k] = v; 213 | } 214 | } 215 | if (cookies.length > 0) { 216 | res["set-cookie"] = cookies; 217 | } 218 | res["content-type"] ??= "text/plain; charset=UTF-8"; 219 | return res; 220 | }; 221 | var responseCache = Symbol("responseCache"); 222 | var getResponseCache = Symbol("getResponseCache"); 223 | var cacheKey = Symbol("cache"); 224 | var GlobalResponse = global.Response; 225 | var Response2 = class _Response { 226 | #body; 227 | #init; 228 | [getResponseCache]() { 229 | delete this[cacheKey]; 230 | return this[responseCache] ||= new GlobalResponse(this.#body, this.#init); 231 | } 232 | constructor(body, init) { 233 | this.#body = body; 234 | if (init instanceof _Response) { 235 | const cachedGlobalResponse = init[responseCache]; 236 | if (cachedGlobalResponse) { 237 | this.#init = cachedGlobalResponse; 238 | this[getResponseCache](); 239 | return; 240 | } else { 241 | this.#init = init.#init; 242 | } 243 | } else { 244 | this.#init = init; 245 | } 246 | if (typeof body === "string" || typeof body?.getReader !== "undefined") { 247 | let headers = init?.headers || { "content-type": "text/plain; charset=UTF-8" }; 248 | if (headers instanceof Headers) { 249 | headers = buildOutgoingHttpHeaders(headers); 250 | } 251 | ; 252 | this[cacheKey] = [init?.status || 200, body, headers]; 253 | } 254 | } 255 | }; 256 | [ 257 | "body", 258 | "bodyUsed", 259 | "headers", 260 | "ok", 261 | "redirected", 262 | "status", 263 | "statusText", 264 | "trailers", 265 | "type", 266 | "url" 267 | ].forEach((k) => { 268 | Object.defineProperty(Response2.prototype, k, { 269 | get() { 270 | return this[getResponseCache]()[k]; 271 | } 272 | }); 273 | }); 274 | ["arrayBuffer", "blob", "clone", "formData", "json", "text"].forEach((k) => { 275 | Object.defineProperty(Response2.prototype, k, { 276 | value: function() { 277 | return this[getResponseCache]()[k](); 278 | } 279 | }); 280 | }); 281 | Object.setPrototypeOf(Response2, GlobalResponse); 282 | Object.setPrototypeOf(Response2.prototype, GlobalResponse.prototype); 283 | var stateKey = Reflect.ownKeys(new GlobalResponse()).find( 284 | (k) => typeof k === "symbol" && k.toString() === "Symbol(state)" 285 | ); 286 | if (!stateKey) { 287 | console.warn("Failed to find Response internal state key"); 288 | } 289 | function getInternalBody(response) { 290 | if (!stateKey) { 291 | return; 292 | } 293 | if (response instanceof Response2) { 294 | response = response[getResponseCache](); 295 | } 296 | const state = response[stateKey]; 297 | return state && state.body || void 0; 298 | } 299 | var X_ALREADY_SENT = "x-hono-already-sent"; 300 | var webFetch = global.fetch; 301 | if (typeof global.crypto === "undefined") { 302 | global.crypto = crypto2; 303 | } 304 | global.fetch = (info, init) => { 305 | init = { 306 | // Disable compression handling so people can return the result of a fetch 307 | // directly in the loader without messing with the Content-Encoding header. 308 | compress: false, 309 | ...init 310 | }; 311 | return webFetch(info, init); 312 | }; 313 | var regBuffer = /^no$/i; 314 | var regContentType = /^(application\/json\b|text\/(?!event-stream\b))/i; 315 | var handleRequestError = () => new Response(null, { 316 | status: 400 317 | }); 318 | var handleFetchError = (e2) => new Response(null, { 319 | status: e2 instanceof Error && (e2.name === "TimeoutError" || e2.constructor.name === "TimeoutError") ? 504 : 500 320 | }); 321 | var handleResponseError = (e2, outgoing) => { 322 | const err = e2 instanceof Error ? e2 : new Error("unknown error", { cause: e2 }); 323 | if (err.code === "ERR_STREAM_PREMATURE_CLOSE") { 324 | console.info("The user aborted a request."); 325 | } else { 326 | console.error(e2); 327 | if (!outgoing.headersSent) { 328 | outgoing.writeHead(500, { "Content-Type": "text/plain" }); 329 | } 330 | outgoing.end(`Error: ${err.message}`); 331 | outgoing.destroy(err); 332 | } 333 | }; 334 | var responseViaCache = (res, outgoing) => { 335 | const [status, body, header] = res[cacheKey]; 336 | if (typeof body === "string") { 337 | header["Content-Length"] = Buffer.byteLength(body); 338 | outgoing.writeHead(status, header); 339 | outgoing.end(body); 340 | } else { 341 | outgoing.writeHead(status, header); 342 | return writeFromReadableStream(body, outgoing)?.catch( 343 | (e2) => handleResponseError(e2, outgoing) 344 | ); 345 | } 346 | }; 347 | var responseViaResponseObject = async (res, outgoing, options = {}) => { 348 | if (res instanceof Promise) { 349 | if (options.errorHandler) { 350 | try { 351 | res = await res; 352 | } catch (err) { 353 | const errRes = await options.errorHandler(err); 354 | if (!errRes) { 355 | return; 356 | } 357 | res = errRes; 358 | } 359 | } else { 360 | res = await res.catch(handleFetchError); 361 | } 362 | } 363 | if (cacheKey in res) { 364 | return responseViaCache(res, outgoing); 365 | } 366 | const resHeaderRecord = buildOutgoingHttpHeaders(res.headers); 367 | const internalBody = getInternalBody(res); 368 | if (internalBody) { 369 | const { length, source, stream } = internalBody; 370 | if (source instanceof Uint8Array && source.byteLength !== length) { 371 | } else { 372 | if (length) { 373 | resHeaderRecord["content-length"] = length; 374 | } 375 | outgoing.writeHead(res.status, resHeaderRecord); 376 | if (typeof source === "string" || source instanceof Uint8Array) { 377 | outgoing.end(source); 378 | } else if (source instanceof Blob) { 379 | outgoing.end(new Uint8Array(await source.arrayBuffer())); 380 | } else { 381 | await writeFromReadableStream(stream, outgoing); 382 | } 383 | return; 384 | } 385 | } 386 | if (res.body) { 387 | const { 388 | "transfer-encoding": transferEncoding, 389 | "content-encoding": contentEncoding, 390 | "content-length": contentLength, 391 | "x-accel-buffering": accelBuffering, 392 | "content-type": contentType 393 | } = resHeaderRecord; 394 | if (transferEncoding || contentEncoding || contentLength || // nginx buffering variant 395 | accelBuffering && regBuffer.test(accelBuffering) || !regContentType.test(contentType)) { 396 | outgoing.writeHead(res.status, resHeaderRecord); 397 | await writeFromReadableStream(res.body, outgoing); 398 | } else { 399 | const buffer = await res.arrayBuffer(); 400 | resHeaderRecord["content-length"] = buffer.byteLength; 401 | outgoing.writeHead(res.status, resHeaderRecord); 402 | outgoing.end(new Uint8Array(buffer)); 403 | } 404 | } else if (resHeaderRecord[X_ALREADY_SENT]) { 405 | } else { 406 | outgoing.writeHead(res.status, resHeaderRecord); 407 | outgoing.end(); 408 | } 409 | }; 410 | var getRequestListener = (fetchCallback, options = {}) => { 411 | if (options.overrideGlobalObjects !== false && global.Request !== Request2) { 412 | Object.defineProperty(global, "Request", { 413 | value: Request2 414 | }); 415 | Object.defineProperty(global, "Response", { 416 | value: Response2 417 | }); 418 | } 419 | return async (incoming, outgoing) => { 420 | let res, req; 421 | try { 422 | req = newRequest(incoming, options.hostname); 423 | outgoing.on("close", () => { 424 | const abortController = req[abortControllerKey]; 425 | if (!abortController) { 426 | return; 427 | } 428 | if (incoming.errored) { 429 | req[abortControllerKey].abort(incoming.errored.toString()); 430 | } else if (!outgoing.writableFinished) { 431 | req[abortControllerKey].abort("Client connection prematurely closed."); 432 | } 433 | }); 434 | res = fetchCallback(req, { incoming, outgoing }); 435 | if (cacheKey in res) { 436 | return responseViaCache(res, outgoing); 437 | } 438 | } catch (e2) { 439 | if (!res) { 440 | if (options.errorHandler) { 441 | res = await options.errorHandler(req ? e2 : toRequestError(e2)); 442 | if (!res) { 443 | return; 444 | } 445 | } else if (!req) { 446 | res = handleRequestError(); 447 | } else { 448 | res = handleFetchError(e2); 449 | } 450 | } else { 451 | return handleResponseError(e2, outgoing); 452 | } 453 | } 454 | try { 455 | return responseViaResponseObject(res, outgoing, options); 456 | } catch (e2) { 457 | return handleResponseError(e2, outgoing); 458 | } 459 | }; 460 | }; 461 | var createAdaptorServer = (options) => { 462 | const fetchCallback = options.fetch; 463 | const requestListener = getRequestListener(fetchCallback, { 464 | hostname: options.hostname, 465 | overrideGlobalObjects: options.overrideGlobalObjects 466 | }); 467 | const createServer = options.createServer || createServerHTTP; 468 | const server = createServer(options.serverOptions || {}, requestListener); 469 | return server; 470 | }; 471 | var serve = (options, listeningListener) => { 472 | const server = createAdaptorServer(options); 473 | server.listen(options?.port ?? 3e3, options.hostname, () => { 474 | const serverInfo = server.address(); 475 | listeningListener && listeningListener(serverInfo); 476 | }); 477 | return server; 478 | }; 479 | 480 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/Router.mjs 481 | var r = ({ base: r2 = "", routes: e2 = [], ...a } = {}) => ({ __proto__: new Proxy({}, { get: (a2, t, o, c) => (a3, ...l) => e2.push([t.toUpperCase?.(), RegExp(`^${(c = (r2 + a3).replace(/\/+(\/|$)/g, "$1")).replace(/(\/?\.?):(\w+)\+/g, "($1(?<$2>*))").replace(/(\/?\.?):(\w+)/g, "($1(?<$2>[^$1/]+?))").replace(/\./g, "\\.").replace(/(\/?)\*/g, "($1.*)?")}/*$`), l, c]) && o }), routes: e2, ...a, async fetch(r3, ...t) { 482 | let o, c, l = new URL(r3.url), p = r3.query = { __proto__: null }; 483 | for (let [r4, e3] of l.searchParams) p[r4] = p[r4] ? [].concat(p[r4], e3) : e3; 484 | r: try { 485 | for (let e3 of a.before || []) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break r; 486 | e: for (let [a2, p2, f, h] of e2) if ((a2 == r3.method || "ALL" == a2) && (c = l.pathname.match(p2))) { 487 | r3.params = c.groups || {}, r3.route = h; 488 | for (let e3 of f) if (null != (o = await e3(r3.proxy ?? r3, ...t))) break e; 489 | } 490 | } catch (e3) { 491 | if (!a.catch) throw e3; 492 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 493 | } 494 | try { 495 | for (let e3 of a.finally || []) o = await e3(o, r3.proxy ?? r3, ...t) ?? o; 496 | } catch (e3) { 497 | if (!a.catch) throw e3; 498 | o = await a.catch(e3, r3.proxy ?? r3, ...t); 499 | } 500 | return o; 501 | } }); 502 | 503 | // node_modules/.deno/itty-router@5.0.18/node_modules/itty-router/cors.mjs 504 | var e = (e2 = {}) => { 505 | const { origin: o = "*", credentials: s = false, allowMethods: c = "*", allowHeaders: r2, exposeHeaders: n, maxAge: t } = e2, a = (e3) => { 506 | const c2 = e3?.headers.get("origin"); 507 | return true === o ? c2 : o instanceof RegExp ? o.test(c2) ? c2 : void 0 : Array.isArray(o) ? o.includes(c2) ? c2 : void 0 : o instanceof Function ? o(c2) : "*" == o && s ? c2 : o; 508 | }, l = (e3, o2) => { 509 | for (const [s2, c2] of Object.entries(o2)) c2 && e3.headers.append(s2, c2); 510 | return e3; 511 | }; 512 | return { corsify: (e3, o2) => e3?.headers?.get("access-control-allow-origin") || 101 == e3.status ? e3 : l(e3.clone(), { "access-control-allow-origin": a(o2), "access-control-allow-credentials": s }), preflight: (e3) => { 513 | if ("OPTIONS" == e3.method) { 514 | const o2 = new Response(null, { status: 204 }); 515 | return l(o2, { "access-control-allow-origin": a(e3), "access-control-allow-methods": c?.join?.(",") ?? c, "access-control-expose-headers": n?.join?.(",") ?? n, "access-control-allow-headers": r2?.join?.(",") ?? r2 ?? e3.headers.get("access-control-request-headers"), "access-control-max-age": t, "access-control-allow-credentials": s }); 516 | } 517 | } }; 518 | }; 519 | 520 | // src/gemini-proxy.ts 521 | async function geminiProxy(rawReq) { 522 | const url = new URL(rawReq.url); 523 | url.host = "generativelanguage.googleapis.com"; 524 | url.port = ""; 525 | url.protocol = "https:"; 526 | const req = new Request(url, rawReq); 527 | const resp = await fetch(req); 528 | return new Response(resp.body, resp); 529 | } 530 | 531 | // src/utils.ts 532 | function getToken(headers) { 533 | for (const [k, v] of headers) { 534 | if (k.toLowerCase() !== "authorization") continue; 535 | const rawApikey = v.substring(v.indexOf(" ") + 1); 536 | if (!rawApikey.includes("#")) { 537 | return { 538 | apikey: rawApikey, 539 | useBeta: false 540 | }; 541 | } 542 | const apikey = rawApikey.substring(0, rawApikey.indexOf("#")); 543 | const params = new URLSearchParams(rawApikey.substring(rawApikey.indexOf("#") + 1)); 544 | return { 545 | apikey, 546 | useBeta: params.has("useBeta") 547 | }; 548 | } 549 | return null; 550 | } 551 | function parseBase64(base64) { 552 | if (!base64.startsWith("data:")) { 553 | return { text: "" }; 554 | } 555 | const [m, data, ..._arr] = base64.split(","); 556 | const mimeType = m.match(/:(?.*?);/)?.groups?.mime ?? "img/png"; 557 | return { 558 | inlineData: { 559 | mimeType, 560 | data 561 | } 562 | }; 563 | } 564 | function openAiMessageToGeminiMessage(messages) { 565 | const result = messages.flatMap(({ role, content }) => { 566 | if (role === "system") { 567 | return [ 568 | { 569 | role: "user", 570 | parts: typeof content !== "string" ? content : [{ text: content }] 571 | } 572 | ]; 573 | } 574 | const parts = content == null || typeof content === "string" ? [{ text: content?.toString() ?? "" }] : content.map((item) => { 575 | if (item.type === "text") return { text: item.text }; 576 | if (item.type === "image_url") return parseBase64(item.image_url.url); 577 | return { text: "OK" }; 578 | }); 579 | return [{ role: "user" === role ? "user" : "model", parts }]; 580 | }); 581 | return result; 582 | } 583 | function genModel(req) { 584 | const model = GeminiModel.modelMapping(req.model); 585 | let functions = req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? []; 586 | functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it }))); 587 | const [responseMimeType, responseSchema] = (() => { 588 | switch (req.response_format?.type) { 589 | case "json_object": 590 | return ["application/json", void 0]; 591 | case "json_schema": 592 | return ["application/json", req.response_format.json_schema.schema]; 593 | case "text": 594 | return ["text/plain", void 0]; 595 | default: 596 | return [void 0, void 0]; 597 | } 598 | })(); 599 | const generateContentRequest = { 600 | contents: openAiMessageToGeminiMessage(req.messages), 601 | generationConfig: { 602 | maxOutputTokens: req.max_completion_tokens ?? void 0, 603 | temperature: req.temperature ?? void 0, 604 | topP: req.top_p ?? void 0, 605 | responseMimeType, 606 | responseSchema, 607 | thinkingConfig: !model.isThinkingModel() ? void 0 : { 608 | includeThoughts: true 609 | } 610 | }, 611 | tools: functions.length === 0 ? void 0 : [ 612 | { 613 | functionDeclarations: functions 614 | } 615 | ], 616 | safetySettings: [ 617 | "HARM_CATEGORY_HATE_SPEECH", 618 | "HARM_CATEGORY_SEXUALLY_EXPLICIT", 619 | "HARM_CATEGORY_DANGEROUS_CONTENT", 620 | "HARM_CATEGORY_HARASSMENT" 621 | ].map((category) => ({ 622 | category, 623 | threshold: "BLOCK_NONE" 624 | })) 625 | }; 626 | return [model, generateContentRequest]; 627 | } 628 | var GeminiModel = class _GeminiModel { 629 | static modelMapping(model) { 630 | const modelName = ModelMapping[model ?? ""] ?? _GeminiModel.defaultModel(model ?? ""); 631 | return new _GeminiModel(modelName); 632 | } 633 | model; 634 | constructor(model) { 635 | this.model = model; 636 | } 637 | isThinkingModel() { 638 | return this.model.includes("thinking"); 639 | } 640 | apiVersion() { 641 | if (this.isThinkingModel()) { 642 | return "v1alpha"; 643 | } 644 | return "v1beta"; 645 | } 646 | toString() { 647 | return this.model; 648 | } 649 | static defaultModel(m) { 650 | if (m.startsWith("gemini")) { 651 | return m; 652 | } 653 | return "gemini-1.5-flash-latest"; 654 | } 655 | }; 656 | var ModelMapping = { 657 | "gpt-3.5-turbo": "gemini-1.5-flash-8b-latest", 658 | "gpt-4": "gemini-1.5-pro-latest", 659 | "gpt-4o": "gemini-1.5-flash-latest", 660 | "gpt-4o-mini": "gemini-1.5-flash-8b-latest", 661 | "gpt-4-vision-preview": "gemini-1.5-flash-latest", 662 | "gpt-4-turbo": "gemini-1.5-pro-latest", 663 | "gpt-4-turbo-preview": "gemini-2.0-flash-exp" 664 | }; 665 | function getRuntimeKey() { 666 | const global2 = globalThis; 667 | if (global2?.Deno !== void 0) { 668 | return "deno"; 669 | } 670 | if (global2?.Bun !== void 0) { 671 | return "bun"; 672 | } 673 | if (typeof global2?.WebSocketPair === "function") { 674 | return "workerd"; 675 | } 676 | if (typeof global2?.EdgeRuntime === "string") { 677 | return "edge-light"; 678 | } 679 | if (global2?.fastly !== void 0) { 680 | return "fastly"; 681 | } 682 | if (global2?.process?.release?.name === "node") { 683 | return "node"; 684 | } 685 | return "other"; 686 | } 687 | 688 | // src/hello.ts 689 | function hello(req) { 690 | const origin = new URL(req.url).origin; 691 | return new Response(` 692 | Hello Gemini-OpenAI-Proxy from ${getRuntimeKey()}! 693 | 694 | You can try it with: 695 | 696 | curl ${origin}/v1/chat/completions \\ 697 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \\ 698 | -H "Content-Type: application/json" \\ 699 | -d '{ 700 | "model": "gpt-3.5-turbo", 701 | "messages": [{"role": "user", "content": "Hello"}], 702 | "temperature": 0.7 703 | }' 704 | `); 705 | } 706 | 707 | // src/log.ts 708 | var LEVEL = ["debug", "info", "warn", "error"]; 709 | var Logger = class { 710 | config; 711 | debug; 712 | info; 713 | warn; 714 | error; 715 | constructor(prefix, logLevel) { 716 | const level = LEVEL.find((it) => it === logLevel) ?? "warn"; 717 | this.config = { 718 | prefix: prefix ?? "", 719 | level 720 | }; 721 | for (const m of LEVEL) { 722 | this[m] = (...data) => this.#write(m, ...data); 723 | } 724 | } 725 | #write(level, ...data) { 726 | const { level: configLevel, prefix } = this.config; 727 | if (LEVEL.indexOf(level) < LEVEL.indexOf(configLevel)) { 728 | return; 729 | } 730 | console[level](`${(/* @__PURE__ */ new Date()).toISOString()} ${level.toUpperCase()}${prefix ? ` ${prefix}` : ""}`, ...data); 731 | } 732 | }; 733 | 734 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/index.js 735 | var ParseError = class extends Error { 736 | constructor(message, options) { 737 | super(message), this.name = "ParseError", this.type = options.type, this.field = options.field, this.value = options.value, this.line = options.line; 738 | } 739 | }; 740 | function noop(_arg) { 741 | } 742 | function createParser(callbacks) { 743 | if (typeof callbacks == "function") 744 | throw new TypeError( 745 | "`callbacks` must be an object, got a function instead. Did you mean `{onEvent: fn}`?" 746 | ); 747 | const { onEvent = noop, onError = noop, onRetry = noop, onComment } = callbacks; 748 | let incompleteLine = "", isFirstChunk = true, id, data = "", eventType = ""; 749 | function feed(newChunk) { 750 | const chunk = isFirstChunk ? newChunk.replace(/^\xEF\xBB\xBF/, "") : newChunk, [complete, incomplete] = splitLines(`${incompleteLine}${chunk}`); 751 | for (const line of complete) 752 | parseLine(line); 753 | incompleteLine = incomplete, isFirstChunk = false; 754 | } 755 | function parseLine(line) { 756 | if (line === "") { 757 | dispatchEvent(); 758 | return; 759 | } 760 | if (line.startsWith(":")) { 761 | onComment && onComment(line.slice(line.startsWith(": ") ? 2 : 1)); 762 | return; 763 | } 764 | const fieldSeparatorIndex = line.indexOf(":"); 765 | if (fieldSeparatorIndex !== -1) { 766 | const field = line.slice(0, fieldSeparatorIndex), offset = line[fieldSeparatorIndex + 1] === " " ? 2 : 1, value = line.slice(fieldSeparatorIndex + offset); 767 | processField(field, value, line); 768 | return; 769 | } 770 | processField(line, "", line); 771 | } 772 | function processField(field, value, line) { 773 | switch (field) { 774 | case "event": 775 | eventType = value; 776 | break; 777 | case "data": 778 | data = `${data}${value} 779 | `; 780 | break; 781 | case "id": 782 | id = value.includes("\0") ? void 0 : value; 783 | break; 784 | case "retry": 785 | /^\d+$/.test(value) ? onRetry(parseInt(value, 10)) : onError( 786 | new ParseError(`Invalid \`retry\` value: "${value}"`, { 787 | type: "invalid-retry", 788 | value, 789 | line 790 | }) 791 | ); 792 | break; 793 | default: 794 | onError( 795 | new ParseError( 796 | `Unknown field "${field.length > 20 ? `${field.slice(0, 20)}\u2026` : field}"`, 797 | { type: "unknown-field", field, value, line } 798 | ) 799 | ); 800 | break; 801 | } 802 | } 803 | function dispatchEvent() { 804 | data.length > 0 && onEvent({ 805 | id, 806 | event: eventType || void 0, 807 | // If the data buffer's last character is a U+000A LINE FEED (LF) character, 808 | // then remove the last character from the data buffer. 809 | data: data.endsWith(` 810 | `) ? data.slice(0, -1) : data 811 | }), id = void 0, data = "", eventType = ""; 812 | } 813 | function reset(options = {}) { 814 | incompleteLine && options.consume && parseLine(incompleteLine), isFirstChunk = true, id = void 0, data = "", eventType = "", incompleteLine = ""; 815 | } 816 | return { feed, reset }; 817 | } 818 | function splitLines(chunk) { 819 | const lines = []; 820 | let incompleteLine = "", searchIndex = 0; 821 | for (; searchIndex < chunk.length; ) { 822 | const crIndex = chunk.indexOf("\r", searchIndex), lfIndex = chunk.indexOf(` 823 | `, searchIndex); 824 | let lineEnd = -1; 825 | if (crIndex !== -1 && lfIndex !== -1 ? lineEnd = Math.min(crIndex, lfIndex) : crIndex !== -1 ? lineEnd = crIndex : lfIndex !== -1 && (lineEnd = lfIndex), lineEnd === -1) { 826 | incompleteLine = chunk.slice(searchIndex); 827 | break; 828 | } else { 829 | const line = chunk.slice(searchIndex, lineEnd); 830 | lines.push(line), searchIndex = lineEnd + 1, chunk[searchIndex - 1] === "\r" && chunk[searchIndex] === ` 831 | ` && searchIndex++; 832 | } 833 | } 834 | return [lines, incompleteLine]; 835 | } 836 | 837 | // node_modules/.deno/eventsource-parser@3.0.1/node_modules/eventsource-parser/dist/stream.js 838 | var EventSourceParserStream = class extends TransformStream { 839 | constructor({ onError, onRetry, onComment } = {}) { 840 | let parser; 841 | super({ 842 | start(controller) { 843 | parser = createParser({ 844 | onEvent: (event) => { 845 | controller.enqueue(event); 846 | }, 847 | onError(error) { 848 | onError === "terminate" ? controller.error(error) : typeof onError == "function" && onError(error); 849 | }, 850 | onRetry, 851 | onComment 852 | }); 853 | }, 854 | transform(chunk) { 855 | parser.feed(chunk); 856 | } 857 | }); 858 | } 859 | }; 860 | 861 | // src/gemini-api-client/errors.ts 862 | var GoogleGenerativeAIError = class extends Error { 863 | constructor(message) { 864 | super(`[GoogleGenerativeAI Error]: ${message}`); 865 | } 866 | }; 867 | var GoogleGenerativeAIResponseError = class extends GoogleGenerativeAIError { 868 | response; 869 | constructor(message, response) { 870 | super(message); 871 | this.response = response; 872 | } 873 | }; 874 | 875 | // src/gemini-api-client/gemini-api-client.ts 876 | async function* streamGenerateContent(apiParam, model, params, requestOptions) { 877 | const response = await makeRequest( 878 | toURL({ model, task: "streamGenerateContent", stream: true, apiParam }), 879 | JSON.stringify(params), 880 | requestOptions 881 | ); 882 | const body = response.body; 883 | if (body == null) { 884 | return; 885 | } 886 | for await (const event of body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream())) { 887 | const responseJson = JSON.parse(event.data); 888 | yield responseJson; 889 | } 890 | } 891 | async function embedContent(apiParam, model, params, requestOptions) { 892 | const response = await makeRequest( 893 | toURL({ model, task: "embedContent", stream: false, apiParam }), 894 | JSON.stringify(params), 895 | requestOptions 896 | ); 897 | const body = response.body; 898 | if (body == null) { 899 | return; 900 | } 901 | const responseJson = await response.json(); 902 | return responseJson; 903 | } 904 | async function makeRequest(url, body, requestOptions) { 905 | let response; 906 | try { 907 | response = await fetch(url, { 908 | ...buildFetchOptions(requestOptions), 909 | method: "POST", 910 | headers: { 911 | "Content-Type": "application/json" 912 | }, 913 | body 914 | }); 915 | if (!response.ok) { 916 | let message = ""; 917 | try { 918 | const errResp = await response.json(); 919 | message = errResp.error?.message; 920 | if (errResp?.error?.details) { 921 | message += ` ${JSON.stringify(errResp.error.details)}`; 922 | } 923 | } catch (_e) { 924 | } 925 | throw new Error(`[${response.status} ${response.statusText}] ${message}`); 926 | } 927 | } catch (e2) { 928 | console.log(e2); 929 | const err = new GoogleGenerativeAIError(`Error fetching from google -> ${e2.message}`); 930 | err.stack = e2.stack; 931 | throw err; 932 | } 933 | return response; 934 | } 935 | function toURL({ 936 | model, 937 | task, 938 | stream, 939 | apiParam 940 | }) { 941 | const BASE_URL = "https://generativelanguage.googleapis.com"; 942 | const api_version = model.apiVersion(); 943 | const url = new URL(`${BASE_URL}/${api_version}/models/${model}:${task}`); 944 | url.searchParams.append("key", apiParam.apikey); 945 | if (stream) { 946 | url.searchParams.append("alt", "sse"); 947 | } 948 | return url; 949 | } 950 | function buildFetchOptions(requestOptions) { 951 | const fetchOptions = {}; 952 | if (requestOptions?.timeout) { 953 | const abortController = new AbortController(); 954 | const signal = abortController.signal; 955 | setTimeout(() => abortController.abort(), requestOptions.timeout); 956 | fetchOptions.signal = signal; 957 | } 958 | return fetchOptions; 959 | } 960 | 961 | // src/gemini-api-client/response-helper.ts 962 | function resultHelper(response) { 963 | if (response.candidates && response.candidates.length > 0) { 964 | if (response.candidates.length > 1) { 965 | console.warn( 966 | `This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.` 967 | ); 968 | } 969 | if (hadBadFinishReason(response.candidates[0])) { 970 | throw new GoogleGenerativeAIResponseError( 971 | `${formatBlockErrorMessage(response)}`, 972 | response 973 | ); 974 | } 975 | return getText(response); 976 | } 977 | if (response.promptFeedback) { 978 | throw new GoogleGenerativeAIResponseError( 979 | `Text not available. ${formatBlockErrorMessage(response)}`, 980 | response 981 | ); 982 | } 983 | return ""; 984 | } 985 | function getText(response) { 986 | if (response.candidates?.[0].content?.parts?.[0]?.text) { 987 | return response.candidates[0].content.parts[0].text; 988 | } 989 | if (response.candidates?.[0].content?.parts?.[0]?.functionCall) { 990 | return response.candidates[0].content.parts[0].functionCall; 991 | } 992 | return ""; 993 | } 994 | var badFinishReasons = ["RECITATION", "SAFETY"]; 995 | function hadBadFinishReason(candidate) { 996 | return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason); 997 | } 998 | function formatBlockErrorMessage(response) { 999 | let message = ""; 1000 | if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) { 1001 | message += "Response was blocked"; 1002 | if (response.promptFeedback?.blockReason) { 1003 | message += ` due to ${response.promptFeedback.blockReason}`; 1004 | } 1005 | if (response.promptFeedback?.blockReasonMessage) { 1006 | message += `: ${response.promptFeedback.blockReasonMessage}`; 1007 | } 1008 | } else if (response.candidates?.[0]) { 1009 | const firstCandidate = response.candidates[0]; 1010 | if (hadBadFinishReason(firstCandidate)) { 1011 | message += `Candidate was blocked due to ${firstCandidate.finishReason}`; 1012 | if (firstCandidate.finishMessage) { 1013 | message += `: ${firstCandidate.finishMessage}`; 1014 | } 1015 | } 1016 | } 1017 | return message; 1018 | } 1019 | 1020 | // src/openai/chat/completions/NonStreamingChatProxyHandler.ts 1021 | async function nonStreamingChatProxyHandler(req, apiParam, log) { 1022 | const [model, geminiReq] = genModel(req); 1023 | let geminiResp = ""; 1024 | try { 1025 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 1026 | const data = resultHelper(it); 1027 | if (typeof data === "string") { 1028 | geminiResp += data; 1029 | } else { 1030 | geminiResp = data; 1031 | break; 1032 | } 1033 | } 1034 | } catch (err) { 1035 | log?.error(req); 1036 | log?.error(err?.message ?? err.toString()); 1037 | geminiResp = err?.message ?? err.toString(); 1038 | } 1039 | log?.debug(req); 1040 | log?.debug(geminiResp); 1041 | function genOpenAiResp(content) { 1042 | if (typeof content === "string") { 1043 | return { 1044 | id: "chatcmpl-abc123", 1045 | object: "chat.completion", 1046 | created: Math.floor(Date.now() / 1e3), 1047 | model: model.model, 1048 | choices: [ 1049 | { 1050 | message: { role: "assistant", content, refusal: null }, 1051 | finish_reason: "stop", 1052 | index: 0, 1053 | logprobs: null 1054 | } 1055 | ] 1056 | }; 1057 | } 1058 | return { 1059 | id: "chatcmpl-abc123", 1060 | object: "chat.completion", 1061 | created: Math.floor(Date.now() / 1e3), 1062 | model: model.model, 1063 | choices: [ 1064 | { 1065 | message: { 1066 | role: "assistant", 1067 | refusal: null, 1068 | content: null, 1069 | function_call: { 1070 | name: content.name ?? "", 1071 | arguments: JSON.stringify(content.args) 1072 | } 1073 | }, 1074 | finish_reason: "function_call", 1075 | index: 0, 1076 | logprobs: null 1077 | } 1078 | ] 1079 | }; 1080 | } 1081 | return Response.json(genOpenAiResp(geminiResp)); 1082 | } 1083 | 1084 | // src/openai/chat/completions/StreamingChatProxyHandler.ts 1085 | function streamingChatProxyHandler(req, apiParam, log) { 1086 | const [model, geminiReq] = genModel(req); 1087 | log?.debug("streamGenerateContent request", req); 1088 | return sseResponse( 1089 | async function* () { 1090 | try { 1091 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 1092 | log?.debug("streamGenerateContent resp", it); 1093 | const data = resultHelper(it); 1094 | yield genStreamResp({ 1095 | model: model.model, 1096 | content: data, 1097 | stop: false 1098 | }); 1099 | } 1100 | } catch (error) { 1101 | yield genStreamResp({ 1102 | model: model.model, 1103 | content: error?.message ?? error.toString(), 1104 | stop: true 1105 | }); 1106 | } 1107 | yield genStreamResp({ model: model.model, content: "", stop: true }); 1108 | yield "[DONE]"; 1109 | return void 0; 1110 | }() 1111 | ); 1112 | } 1113 | function genStreamResp({ 1114 | model, 1115 | content, 1116 | stop 1117 | }) { 1118 | if (typeof content === "string") { 1119 | return { 1120 | id: "chatcmpl-abc123", 1121 | object: "chat.completion.chunk", 1122 | created: Math.floor(Date.now() / 1e3), 1123 | model, 1124 | choices: [ 1125 | { 1126 | delta: { role: "assistant", content }, 1127 | finish_reason: stop ? "stop" : null, 1128 | index: 0 1129 | } 1130 | ] 1131 | }; 1132 | } 1133 | return { 1134 | id: "chatcmpl-abc123", 1135 | object: "chat.completion.chunk", 1136 | created: Math.floor(Date.now() / 1e3), 1137 | model, 1138 | choices: [ 1139 | { 1140 | delta: { role: "assistant", function_call: content }, 1141 | finish_reason: stop ? "function_call" : null, 1142 | index: 0 1143 | } 1144 | ] 1145 | }; 1146 | } 1147 | var encoder = new TextEncoder(); 1148 | function sseResponse(dataStream) { 1149 | const s = new ReadableStream({ 1150 | async pull(controller) { 1151 | const { value, done } = await dataStream.next(); 1152 | if (done) { 1153 | controller.close(); 1154 | } else { 1155 | const data = typeof value === "string" ? value : JSON.stringify(value); 1156 | controller.enqueue(encoder.encode(toSseMsg({ data }))); 1157 | } 1158 | } 1159 | }); 1160 | const response = new Response(s, { 1161 | status: 200, 1162 | headers: new Headers({ 1163 | "Content-Type": "text/event-stream" 1164 | }) 1165 | }); 1166 | return response; 1167 | } 1168 | function toSseMsg({ event, data, id }) { 1169 | let result = `data: ${data} 1170 | `; 1171 | if (event) { 1172 | result += `event: ${event ?? ""} 1173 | `; 1174 | } 1175 | if (id) { 1176 | result += `id: ${id ?? ""} 1177 | `; 1178 | } 1179 | return `${result} 1180 | `; 1181 | } 1182 | 1183 | // src/openai/chat/completions/ChatProxyHandler.ts 1184 | async function chatProxyHandler(rawReq) { 1185 | const req = await rawReq.json(); 1186 | const headers = rawReq.headers; 1187 | const apiParam = getToken(headers); 1188 | if (apiParam == null) { 1189 | return new Response("Unauthorized", { status: 401 }); 1190 | } 1191 | if (req.stream !== true) { 1192 | return await nonStreamingChatProxyHandler(req, apiParam, rawReq.logger); 1193 | } 1194 | return streamingChatProxyHandler(req, apiParam, rawReq.logger); 1195 | } 1196 | 1197 | // src/openai/embeddingProxyHandler.ts 1198 | async function embeddingProxyHandler(rawReq) { 1199 | const req = await rawReq.json(); 1200 | const log = rawReq.logger; 1201 | const headers = rawReq.headers; 1202 | const apiParam = getToken(headers); 1203 | if (apiParam == null) { 1204 | return new Response("Unauthorized", { status: 401 }); 1205 | } 1206 | const embedContentRequest = { 1207 | model: "models/text-embedding-004", 1208 | content: { 1209 | parts: [req.input].flat().map((it) => ({ text: it.toString() })) 1210 | } 1211 | }; 1212 | log?.warn("request", embedContentRequest); 1213 | let geminiResp = []; 1214 | try { 1215 | const it = await embedContent(apiParam, new GeminiModel("text-embedding-004"), embedContentRequest); 1216 | const data = it?.embedding?.values; 1217 | geminiResp = data; 1218 | } catch (err) { 1219 | log?.error(req); 1220 | log?.error(err?.message ?? err.toString()); 1221 | geminiResp = err?.message ?? err.toString(); 1222 | } 1223 | log?.debug(req); 1224 | log?.debug(geminiResp); 1225 | const resp = { 1226 | object: "list", 1227 | data: [ 1228 | { 1229 | object: "embedding", 1230 | index: 0, 1231 | embedding: geminiResp ?? [] 1232 | } 1233 | ], 1234 | model: req.model, 1235 | usage: { 1236 | prompt_tokens: 5, 1237 | total_tokens: 5 1238 | } 1239 | }; 1240 | return Response.json(resp); 1241 | } 1242 | 1243 | // src/openai/models.ts 1244 | var modelData = Object.keys(ModelMapping).map((model) => ({ 1245 | created: 1677610602, 1246 | object: "model", 1247 | owned_by: "openai", 1248 | id: model 1249 | })); 1250 | var models = () => { 1251 | return { 1252 | object: "list", 1253 | data: modelData 1254 | }; 1255 | }; 1256 | var modelDetail = (model) => { 1257 | return modelData.find((it) => it.id === model); 1258 | }; 1259 | 1260 | // src/app.ts 1261 | var { preflight, corsify } = e({ allowHeaders: "*" }); 1262 | var app = r({ 1263 | before: [ 1264 | preflight, 1265 | (req) => { 1266 | req.logger = new Logger(crypto.randomUUID().toString()); 1267 | req.logger.warn(`--> ${req.method} ${req.url}`); 1268 | } 1269 | ], 1270 | finally: [ 1271 | corsify, 1272 | (_, req) => { 1273 | req.logger?.warn(`<-- ${req.method} ${req.url}`); 1274 | } 1275 | ] 1276 | }); 1277 | app.get("/", hello); 1278 | app.post("/v1/chat/completions", chatProxyHandler); 1279 | app.post("/v1/embeddings", embeddingProxyHandler); 1280 | app.get("/v1/models", () => Response.json(models())); 1281 | app.get("/v1/models/:model", (c) => Response.json(modelDetail(c.params.model))); 1282 | app.post("/:model_version/models/:model_and_action", geminiProxy); 1283 | app.all("*", () => new Response("Page Not Found", { status: 404 })); 1284 | 1285 | // main_node.ts 1286 | console.log("Listening on http://localhost:8000/"); 1287 | serve({ 1288 | fetch: app.fetch, 1289 | port: 8e3 1290 | }); 1291 | -------------------------------------------------------------------------------- /docker/bun.Dockerfile: -------------------------------------------------------------------------------- 1 | #---------------- 2 | FROM denoland/deno:latest AS builder 3 | WORKDIR /data 4 | COPY . . 5 | RUN deno task build:deno 6 | 7 | #---------------- 8 | 9 | FROM oven/bun:latest 10 | WORKDIR /data 11 | COPY --from=builder /data/dist/main_bun.mjs app.mjs 12 | CMD ["bun", "run", "app.mjs"] -------------------------------------------------------------------------------- /docker/deno.Dockerfile: -------------------------------------------------------------------------------- 1 | #---------------- 2 | FROM denoland/deno:latest AS builder 3 | WORKDIR /data 4 | COPY . . 5 | RUN deno task build:deno 6 | 7 | #---------------- 8 | FROM denoland/deno:latest 9 | WORKDIR /data 10 | COPY --from=builder /data/dist/main_deno.mjs app.mjs 11 | ENTRYPOINT [ "" ] 12 | CMD ["deno","run","--allow-net","--allow-env","app.mjs"] 13 | -------------------------------------------------------------------------------- /docker/node.Dockerfile: -------------------------------------------------------------------------------- 1 | #---------------- 2 | FROM denoland/deno:latest AS builder 3 | WORKDIR /data 4 | COPY . . 5 | RUN deno task build:deno 6 | 7 | #---------------- 8 | FROM node:latest 9 | WORKDIR /data 10 | COPY --from=builder /data/dist/main_node.mjs app.mjs 11 | CMD ["node", "app.mjs"] -------------------------------------------------------------------------------- /fly.toml: -------------------------------------------------------------------------------- 1 | app = 'gemini-openai-proxy' 2 | primary_region = 'lax' 3 | 4 | [build] 5 | dockerfile = 'docker/bun.Dockerfile' 6 | 7 | [http_service] 8 | internal_port = 8000 9 | force_https = true 10 | auto_stop_machines = "suspend" 11 | auto_start_machines = true 12 | min_machines_running = 0 13 | processes = ['app'] 14 | 15 | [http_service.concurrency] 16 | type = 'requests' 17 | hard_limit = 250 18 | soft_limit = 200 19 | 20 | [[vm]] 21 | memory = '256MB' 22 | cpu_kind = 'shared' 23 | cpus = 1 24 | -------------------------------------------------------------------------------- /generate-opeapi-types.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env -S deno run --allow-net --allow-write=./src/generated-types --allow-read=./src/generated-types --allow-read=. --no-prompt --allow-env 2 | import { emptyDirSync } from "jsr:@std/fs" 3 | import { parse as parseYaml } from "jsr:@std/yaml" 4 | import openapiTS, { astToString } from "https://esm.sh/openapi-typescript@7.6.1?bundle" 5 | 6 | // @ts-types="https://unpkg.com/openapi-format@1.25.0/types/openapi-format.d.ts" 7 | import { openapiFilter } from "openapi-format" 8 | 9 | const openapis = [ 10 | { 11 | data: await fetch("https://raw.githubusercontent.com/openai/openai-openapi/refs/heads/master/openapi.yaml") 12 | .then((res) => res.text()) 13 | .then(parseYaml), 14 | path: "./src/generated-types/openai-types.ts", 15 | }, 16 | { 17 | data: await fetch( 18 | "https://github.com/zuisong/googleapis-openapi/raw/refs/heads/main/googleapis/generativelanguage/v1alpha/generativelanguage-api-openapi_v3.json", 19 | ) 20 | .then((res) => res.text()) 21 | .then(JSON.parse), 22 | path: "./src/generated-types/gemini-types.ts", 23 | }, 24 | ] as const 25 | 26 | emptyDirSync("./src/generated-types/") 27 | 28 | for (const { path, data } of openapis) { 29 | const { data: res } = await openapiFilter(data, { 30 | filterSet: { 31 | inverseOperationIds: [ 32 | /// openai 33 | "createChatCompletion", 34 | "createEmbedding", 35 | "listModels", 36 | /// googleapis 37 | "generativelanguage.tunedModels.streamGenerateContent", 38 | "generativelanguage.models.embedContent", 39 | "generativelanguage.tunedModels.create", 40 | ], 41 | unusedComponents: ["schemas"], 42 | preserveEmptyObjects: false, 43 | }, 44 | defaultFilter: {}, 45 | }) 46 | 47 | const ast = await openapiTS(res, { 48 | excludeDeprecated: false, 49 | cwd: "", 50 | alphabetize: true, 51 | additionalProperties: true, 52 | }) 53 | 54 | const code = astToString(ast) 55 | 56 | await Deno.writeTextFile(path, code) 57 | } 58 | -------------------------------------------------------------------------------- /license: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024-present zuisong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /main_bun.ts: -------------------------------------------------------------------------------- 1 | import { app } from "./src/app.ts" 2 | 3 | console.log("Listening on http://localhost:8000/") 4 | 5 | // @ts-ignore supress warning 6 | Bun.serve({ 7 | port: 8000, 8 | fetch: app.fetch, 9 | }) 10 | -------------------------------------------------------------------------------- /main_cloudflare-workers.ts: -------------------------------------------------------------------------------- 1 | import { app } from "./src/app.ts" 2 | 3 | export default { 4 | fetch: app.fetch, 5 | } 6 | -------------------------------------------------------------------------------- /main_deno.ts: -------------------------------------------------------------------------------- 1 | import { app } from "./src/app.ts" 2 | 3 | Deno.serve({ port: 8000 }, app.fetch) 4 | -------------------------------------------------------------------------------- /main_node.ts: -------------------------------------------------------------------------------- 1 | import { serve } from "@hono/node-server" 2 | import { app } from "./src/app.ts" 3 | console.log("Listening on http://localhost:8000/") 4 | serve({ 5 | fetch: app.fetch, 6 | port: 8000, 7 | }) 8 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gemini-openai-proxy", 3 | "private": true, 4 | "version": "0.15.0", 5 | "author": "zuisong", 6 | "homepage": "https://github.com/zuisong/gemini-openai-proxy", 7 | "type": "module", 8 | "license": "MIT", 9 | "scripts": { 10 | "start:bun": "bun main_bun.ts", 11 | "start:deno": "deno run --allow-net --allow-env main_deno.ts", 12 | "start:node": "npm run build:node && node dist/main_node.mjs", 13 | "build:bun": "bun run build.mjs", 14 | "build:deno": "deno run --allow-read --allow-env --allow-write=./dist --allow-run build.mjs", 15 | "build:rollup": "deno run --allow-ffi --allow-write=./dist --allow-read --allow-env build.mjs", 16 | "build:node": "node build.mjs", 17 | "test:deno": "deno test --allow-net --allow-env", 18 | "test-cov": "deno test --coverage -A --env && deno coverage coverage --lcov --output=coverage/lcov.info" 19 | }, 20 | "dependencies": { 21 | "@hono/node-server": "1.14.0", 22 | "eventsource-parser": "3.0.1", 23 | "itty-router": "5.0.18" 24 | }, 25 | "devDependencies": { 26 | "@total-typescript/ts-reset": "0.6.1", 27 | "esbuild": "0.25.2" 28 | }, 29 | "engines": { 30 | "node": ">=18.0.0", 31 | "bun": ">=1.23.0", 32 | "deno": ">=1.40.0" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Gemini-OpenAI-Proxy 2 | 3 | Gemini-OpenAI-Proxy is a proxy software. It is designed to convert OpenAI API 4 | protocol calls into Google Gemini Pro protocol, so that software using OpenAI 5 | protocol can use Gemini Pro model without perception. 6 | 7 | If you're interested in using Google Gemini but don't want to modify your 8 | software, Gemini-OpenAI-Proxy is a great option. It allows you to easily 9 | integrate the powerful features of Google Gemini without having to do any 10 | complex development work. 11 | 12 | ## Demo 13 | 14 | > Get api key from 15 | 16 |
17 | 18 | ✅ Gemini Pro 19 | 20 | ```shell 21 | curl -s http://localhost:8000/v1/chat/completions \ 22 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \ 23 | -H "Content-Type: application/json" \ 24 | -d '{ 25 | "model": "gpt-3.5-turbo", 26 | "messages": [{"role": "user", "content": "Hello, Who are you?"}], 27 | "temperature": 0.7 28 | }' 29 | ``` 30 | 31 | ![demo](./assets/demo.png) 32 | 33 |
34 | 35 |
36 | 37 | ✅ Gemini Pro Vision 38 | 39 | ```shell 40 | curl -s http://localhost:8000/v1/chat/completions \ 41 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \ 42 | -H "Content-Type: application/json" \ 43 | -d '{ 44 | "model": "gpt-4-vision-preview", 45 | "messages": [ 46 | { 47 | "role": "user", 48 | "content": [ 49 | { 50 | "type": "text", 51 | "text": "What do you see in this picture?" 52 | }, 53 | { 54 | "type": "image_url", 55 | "image_url": { 56 | "url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAnAgMAAAA0vyM3AAAACVBMVEX/4WwCAgF3aTMcpbzGAAAAa0lEQVR4nGOgAWB1QOYEIHFEcXKmhCBxQqYgcSLEEGymAFEEhzFAFYmTwNoA53A6IDmB1YETidPAiLBVFGgEgrNqJYIzNTQU4Z5QZA6QNQ3hGpAZcNegceBOADFQOQlQDhfQyUwLkPxKVwAABbkRCcDA66QAAAAASUVORK5CYII=" 57 | } 58 | } 59 | ] 60 | } 61 | ], 62 | "stream": false 63 | }' 64 | ``` 65 | 66 | ![vision demo](./assets/vision-demo.jpeg) 67 | 68 |
69 | 70 | ## Plan 71 | 72 | - [x] `/v1/chat/completions` 73 | - [x] stream 74 | - [x] complete 75 | 76 | ## Model Mappings 77 | 78 | | Request Model | Target Gemini Model | 79 | | -------------------- | -------------------------- | 80 | | gpt-3.5-turbo | gemini-1.5-flash-8b-latest | 81 | | gpt-4 | gemini-1.5-pro-latest | 82 | | gpt-4o | gemini-1.5-flash-latest | 83 | | gpt-4o-mini | gemini-1.5-flash-8b-latest | 84 | | gpt-4-vision-preview | gemini-1.5-flash-latest | 85 | | gpt-4-turbo | gemini-1.5-pro-latest | 86 | | gpt-4-turbo-preview | gemini-2.0-flash-exp | 87 | | gemini* | gemini* | 88 | | ...(others) | gemini-1.5-flash-latest | 89 | 90 | ## Run On Serverless 91 | 92 | ### [Cloudflare Workers](https://workers.cloudflare.com) 93 | 94 | > build command `npm run build:cf_worker` 95 | 96 | Copy [`main_cloudflare-workers.mjs`](./dist/main_cloudflare-workers.mjs) to 97 | `cloudflare-workers` 98 | 99 | ### [Deno Deploy](https://deno.com/deploy) 100 | 101 | > build command `npm run build:deno` 102 | 103 | Copy [`main_deno.mjs`](./dist/main_deno.mjs) to `deno deploy` 104 | 105 | ### [Vercel](https://vercel.com) 106 | 107 | > build command `npm run build:cf_worker` 108 | 109 | [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https://github.com/zuisong/gemini-openai-proxy&repository-name=gemini-openai-proxy) 110 | 111 | - Alternatively can be deployed with [cli](https://vercel.com/docs/cli): 112 | `vercel deploy` 113 | - Serve locally: `vercel dev` 114 | - Vercel _Functions_ 115 | [limitations](https://vercel.com/docs/functions/limitations) (with _Edge_ 116 | runtime) 117 | 118 | ## Run On Local 119 | 120 | ### deno 121 | 122 | ```shell 123 | deno task start:deno 124 | ``` 125 | 126 | ### node 127 | 128 | ```shell 129 | npm install && npm run start:node 130 | ``` 131 | 132 | ### bun 133 | 134 | ```shell 135 | bun run start:bun 136 | ``` 137 | 138 | ### docker 139 | 140 | ```shell 141 | docker run -d -p 8000:8000 ghcr.io/zuisong/gemini-openai-proxy:deno 142 | ## or 143 | docker run -d -p 8000:8000 ghcr.io/zuisong/gemini-openai-proxy:bun 144 | ## or 145 | docker run -d -p 8000:8000 ghcr.io/zuisong/gemini-openai-proxy:node 146 | ``` 147 | 148 | ## Star History 149 | 150 | 151 | 152 | 153 | 154 | Star History Chart 155 | 156 | 157 | -------------------------------------------------------------------------------- /src/app.ts: -------------------------------------------------------------------------------- 1 | import type { IRequest } from "itty-router" 2 | import { Router } from "itty-router/Router" 3 | import { cors } from "itty-router/cors" 4 | import { geminiProxy } from "./gemini-proxy.ts" 5 | import { hello } from "./hello.ts" 6 | import { type Any, Logger } from "./log.ts" 7 | import { chatProxyHandler } from "./openai/chat/completions/ChatProxyHandler.ts" 8 | import { embeddingProxyHandler } from "./openai/embeddingProxyHandler.ts" 9 | import { modelDetail, models } from "./openai/models.ts" 10 | 11 | const { preflight, corsify } = cors({ allowHeaders: "*" }) 12 | 13 | const app = Router({ 14 | before: [ 15 | preflight, 16 | (req) => { 17 | req.logger = new Logger(crypto.randomUUID().toString()) 18 | req.logger.warn(`--> ${req.method} ${req.url}`) 19 | }, 20 | ], 21 | finally: [ 22 | corsify, 23 | (_, req) => { 24 | req.logger?.warn(`<-- ${req.method} ${req.url}`) 25 | // return resp 26 | }, 27 | ], 28 | }) 29 | 30 | app.get("/", hello) 31 | app.post("/v1/chat/completions", chatProxyHandler) 32 | app.post("/v1/embeddings", embeddingProxyHandler) 33 | app.get("/v1/models", () => Response.json(models())) 34 | app.get("/v1/models/:model", (c) => Response.json(modelDetail(c.params.model))) 35 | app.post("/:model_version/models/:model_and_action", geminiProxy) 36 | app.all("*", () => new Response("Page Not Found", { status: 404 })) 37 | 38 | export { app } 39 | -------------------------------------------------------------------------------- /src/gemini-api-client/errors.ts: -------------------------------------------------------------------------------- 1 | export class GoogleGenerativeAIError extends Error { 2 | constructor(message: string) { 3 | super(`[GoogleGenerativeAI Error]: ${message}`) 4 | } 5 | } 6 | 7 | export class GoogleGenerativeAIResponseError extends GoogleGenerativeAIError { 8 | public response?: T 9 | constructor(message: string, response?: T) { 10 | super(message) 11 | this.response = response 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/gemini-api-client/gemini-api-client.ts: -------------------------------------------------------------------------------- 1 | import { EventSourceParserStream } from "eventsource-parser/stream" 2 | import type { components } from "../generated-types/gemini-types.ts" 3 | import type { ApiParam, GeminiModel } from "../utils.ts" 4 | import { GoogleGenerativeAIError } from "./errors.ts" 5 | import type { 6 | EmbedContentRequest, 7 | EmbedContentResponse, 8 | GenerateContentRequest, 9 | GenerateContentResponse, 10 | RequestOptions, 11 | } from "./types.ts" 12 | 13 | interface Task { 14 | streamGenerateContent: { 15 | request: GenerateContentRequest 16 | response: GenerateContentResponse 17 | } 18 | embedContent: { 19 | request: EmbedContentRequest 20 | response: EmbedContentResponse 21 | } 22 | } 23 | 24 | export async function* streamGenerateContent( 25 | apiParam: ApiParam, 26 | model: GeminiModel, 27 | params: Task["streamGenerateContent"]["request"], 28 | requestOptions?: RequestOptions, 29 | ) { 30 | const response = await makeRequest( 31 | toURL({ model, task: "streamGenerateContent", stream: true, apiParam }), 32 | JSON.stringify(params), 33 | requestOptions, 34 | ) 35 | const body = response.body 36 | if (body == null) { 37 | return 38 | } 39 | 40 | for await (const event of body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream())) { 41 | const responseJson = JSON.parse(event.data) as Task["streamGenerateContent"]["response"] 42 | yield responseJson 43 | } 44 | } 45 | 46 | export async function embedContent( 47 | apiParam: ApiParam, 48 | model: GeminiModel, 49 | params: Task["embedContent"]["request"], 50 | requestOptions?: RequestOptions, 51 | ) { 52 | const response = await makeRequest( 53 | toURL({ model, task: "embedContent", stream: false, apiParam }), 54 | JSON.stringify(params), 55 | requestOptions, 56 | ) 57 | const body = response.body 58 | if (body == null) { 59 | return 60 | } 61 | 62 | const responseJson = (await response.json()) as Task["embedContent"]["response"] 63 | return responseJson 64 | } 65 | 66 | async function makeRequest(url: URL, body: string, requestOptions?: RequestOptions): Promise { 67 | let response: Response 68 | try { 69 | response = await fetch(url, { 70 | ...buildFetchOptions(requestOptions), 71 | method: "POST", 72 | headers: { 73 | "Content-Type": "application/json", 74 | }, 75 | body, 76 | }) 77 | if (!response.ok) { 78 | let message: string | undefined = "" 79 | try { 80 | const errResp = (await response.json()) as components["schemas"]["Operation"] 81 | message = errResp.error?.message 82 | if (errResp?.error?.details) { 83 | message += ` ${JSON.stringify(errResp.error.details)}` 84 | } 85 | } catch (_e) { 86 | // ignored 87 | } 88 | throw new Error(`[${response.status} ${response.statusText}] ${message}`) 89 | } 90 | } catch (e) { 91 | console.log(e) 92 | const err = new GoogleGenerativeAIError(`Error fetching from google -> ${e.message}`) 93 | err.stack = e.stack 94 | throw err 95 | } 96 | return response 97 | } 98 | 99 | function toURL({ 100 | model, 101 | task, 102 | stream, 103 | apiParam, 104 | }: { 105 | model: GeminiModel 106 | task: keyof Task 107 | stream: boolean 108 | apiParam: ApiParam 109 | }) { 110 | const BASE_URL = "https://generativelanguage.googleapis.com" 111 | const api_version = model.apiVersion() 112 | const url = new URL(`${BASE_URL}/${api_version}/models/${model}:${task}`) 113 | url.searchParams.append("key", apiParam.apikey) 114 | if (stream) { 115 | url.searchParams.append("alt", "sse") 116 | } 117 | return url 118 | } 119 | 120 | /** 121 | * Generates the request options to be passed to the fetch API. 122 | * @param requestOptions - The user-defined request options. 123 | * @returns The generated request options. 124 | */ 125 | function buildFetchOptions(requestOptions?: RequestOptions): RequestInit { 126 | const fetchOptions = {} as RequestInit 127 | if (requestOptions?.timeout) { 128 | const abortController = new AbortController() 129 | const signal = abortController.signal 130 | setTimeout(() => abortController.abort(), requestOptions.timeout) 131 | fetchOptions.signal = signal 132 | } 133 | return fetchOptions 134 | } 135 | -------------------------------------------------------------------------------- /src/gemini-api-client/response-helper.ts: -------------------------------------------------------------------------------- 1 | import { GoogleGenerativeAIResponseError } from "./errors.ts" 2 | import type { FunctionCall } from "./types.ts" 3 | import type { Candidate, FinishReason, GenerateContentResponse } from "./types.ts" 4 | 5 | /** 6 | * Adds convenience helper methods to a response object, including stream 7 | * chunks (as long as each chunk is a complete GenerateContentResponse JSON). 8 | */ 9 | export function resultHelper(response: GenerateContentResponse): string | FunctionCall { 10 | if (response.candidates && response.candidates.length > 0) { 11 | if (response.candidates.length > 1) { 12 | console.warn( 13 | `This response had ${response.candidates.length} candidates. Returning text from the first candidate only. Access response.candidates directly to use the other candidates.`, 14 | ) 15 | } 16 | if (hadBadFinishReason(response.candidates[0])) { 17 | throw new GoogleGenerativeAIResponseError( 18 | `${formatBlockErrorMessage(response)}`, 19 | response, 20 | ) 21 | } 22 | return getText(response) 23 | } 24 | if (response.promptFeedback) { 25 | throw new GoogleGenerativeAIResponseError( 26 | `Text not available. ${formatBlockErrorMessage(response)}`, 27 | response, 28 | ) 29 | } 30 | return "" 31 | } 32 | 33 | /** 34 | * Returns text of first candidate. 35 | */ 36 | export function getText(response: GenerateContentResponse): string | FunctionCall { 37 | if (response.candidates?.[0].content?.parts?.[0]?.text) { 38 | return response.candidates[0].content.parts[0].text 39 | } 40 | if (response.candidates?.[0].content?.parts?.[0]?.functionCall) { 41 | return response.candidates[0].content.parts[0].functionCall 42 | } 43 | return "" 44 | } 45 | 46 | const badFinishReasons: FinishReason[] = ["RECITATION", "SAFETY"] 47 | 48 | function hadBadFinishReason(candidate: Candidate): boolean { 49 | return !!candidate.finishReason && badFinishReasons.includes(candidate.finishReason) 50 | } 51 | 52 | function formatBlockErrorMessage(response: GenerateContentResponse): string { 53 | let message = "" 54 | if ((!response.candidates || response.candidates.length === 0) && response.promptFeedback) { 55 | message += "Response was blocked" 56 | if (response.promptFeedback?.blockReason) { 57 | message += ` due to ${response.promptFeedback.blockReason}` 58 | } 59 | if (response.promptFeedback?.blockReasonMessage) { 60 | message += `: ${response.promptFeedback.blockReasonMessage}` 61 | } 62 | } else if (response.candidates?.[0]) { 63 | const firstCandidate = response.candidates[0] 64 | if (hadBadFinishReason(firstCandidate)) { 65 | message += `Candidate was blocked due to ${firstCandidate.finishReason}` 66 | if (firstCandidate.finishMessage) { 67 | message += `: ${firstCandidate.finishMessage}` 68 | } 69 | } 70 | } 71 | return message 72 | } 73 | -------------------------------------------------------------------------------- /src/gemini-api-client/types.ts: -------------------------------------------------------------------------------- 1 | import type { components } from "../generated-types/gemini-types.ts" 2 | 3 | export type GenerateContentRequest = components["schemas"]["GenerateContentRequest"] 4 | 5 | export type Content = components["schemas"]["Content"] 6 | 7 | export type GenerateContentResponse = components["schemas"]["GenerateContentResponse"] 8 | 9 | export type Candidate = components["schemas"]["Candidate"] 10 | 11 | export type FunctionCall = components["schemas"]["FunctionCall"] 12 | 13 | export type FinishReason = Candidate["finishReason"] 14 | 15 | export type Part = components["schemas"]["Part"] 16 | 17 | export type JsonSchema = components["schemas"]["Schema"] 18 | 19 | export type EmbedContentRequest = components["schemas"]["EmbedContentRequest"] 20 | 21 | export type EmbedContentResponse = components["schemas"]["EmbedContentResponse"] 22 | 23 | export interface RequestOptions { 24 | timeout: number 25 | } 26 | -------------------------------------------------------------------------------- /src/gemini-proxy.ts: -------------------------------------------------------------------------------- 1 | export async function geminiProxy(rawReq: Request) { 2 | const url = new URL(rawReq.url) 3 | url.host = "generativelanguage.googleapis.com" 4 | url.port = "" 5 | url.protocol = "https:" 6 | const req = new Request(url, rawReq) 7 | const resp = await fetch(req) 8 | return new Response(resp.body, resp) 9 | } 10 | -------------------------------------------------------------------------------- /src/hello.ts: -------------------------------------------------------------------------------- 1 | import { getRuntimeKey } from "./utils.ts" 2 | 3 | export function hello(req: Request): Response { 4 | const origin = new URL(req.url).origin 5 | return new Response(` 6 | Hello Gemini-OpenAI-Proxy from ${getRuntimeKey()}! 7 | 8 | You can try it with: 9 | 10 | curl ${origin}/v1/chat/completions \\ 11 | -H "Authorization: Bearer $YOUR_GEMINI_API_KEY" \\ 12 | -H "Content-Type: application/json" \\ 13 | -d '{ 14 | "model": "gpt-3.5-turbo", 15 | "messages": [{"role": "user", "content": "Hello"}], 16 | "temperature": 0.7 17 | }' 18 | `) 19 | } 20 | -------------------------------------------------------------------------------- /src/log.ts: -------------------------------------------------------------------------------- 1 | export type Any = Parameters[0] 2 | 3 | export interface ILogger { 4 | error: (...data: Any[]) => void 5 | warn: (...data: Any[]) => void 6 | info: (...data: Any[]) => void 7 | debug: (...data: Any[]) => void 8 | } 9 | 10 | const LEVEL = ["debug", "info", "warn", "error"] as const 11 | 12 | interface Config { 13 | level: (typeof LEVEL)[number] 14 | prefix: string 15 | } 16 | 17 | export class Logger implements ILogger { 18 | private config: Config 19 | 20 | debug!: Log 21 | info!: Log 22 | warn!: Log 23 | error!: Log 24 | 25 | constructor(prefix?: string, logLevel?: string) { 26 | const level = LEVEL.find((it) => it === logLevel) ?? "warn" 27 | this.config = { 28 | prefix: prefix ?? "", 29 | level, 30 | } 31 | 32 | for (const m of LEVEL) { 33 | this[m] = (...data: Any[]) => this.#write(m, ...data) 34 | } 35 | } 36 | 37 | #write(level: Config["level"], ...data: Any[]) { 38 | const { level: configLevel, prefix } = this.config 39 | if (LEVEL.indexOf(level) < LEVEL.indexOf(configLevel)) { 40 | return 41 | } 42 | 43 | console[level](`${new Date().toISOString()} ${level.toUpperCase()}${prefix ? ` ${prefix}` : ""}`, ...data) 44 | } 45 | } 46 | 47 | type Log = typeof console.log 48 | -------------------------------------------------------------------------------- /src/openai/chat/completions/ChatProxyHandler.ts: -------------------------------------------------------------------------------- 1 | import type { OpenAI } from "../../../types.ts" 2 | import { getToken } from "../../../utils.ts" 3 | import { nonStreamingChatProxyHandler } from "./NonStreamingChatProxyHandler.ts" 4 | import { streamingChatProxyHandler } from "./StreamingChatProxyHandler.ts" 5 | 6 | export async function chatProxyHandler(rawReq: Request): Promise { 7 | const req = (await rawReq.json()) as OpenAI.Chat.ChatCompletionCreateParams 8 | const headers = rawReq.headers 9 | const apiParam = getToken(headers) 10 | if (apiParam == null) { 11 | return new Response("Unauthorized", { status: 401 }) 12 | } 13 | 14 | if (req.stream !== true) { 15 | return await nonStreamingChatProxyHandler(req, apiParam, rawReq.logger) 16 | } 17 | return streamingChatProxyHandler(req, apiParam, rawReq.logger) 18 | } 19 | -------------------------------------------------------------------------------- /src/openai/chat/completions/NonStreamingChatProxyHandler.ts: -------------------------------------------------------------------------------- 1 | import { streamGenerateContent } from "../../../gemini-api-client/gemini-api-client.ts" 2 | import { resultHelper } from "../../../gemini-api-client/response-helper.ts" 3 | import type { FunctionCall } from "../../../gemini-api-client/types.ts" 4 | import type { Logger } from "../../../log.ts" 5 | import type { OpenAI } from "../../../types.ts" 6 | import { type ApiParam, genModel } from "../../../utils.ts" 7 | 8 | export async function nonStreamingChatProxyHandler( 9 | req: OpenAI.Chat.ChatCompletionCreateParams, 10 | apiParam: ApiParam, 11 | log?: Logger, 12 | ): Promise { 13 | const [model, geminiReq] = genModel(req) 14 | let geminiResp: string | FunctionCall = "" 15 | 16 | try { 17 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 18 | const data = resultHelper(it) 19 | if (typeof data === "string") { 20 | geminiResp += data 21 | } else { 22 | geminiResp = data 23 | break 24 | } 25 | } 26 | } catch (err) { 27 | // 出现异常时打印请求参数和响应,以便调试 28 | log?.error(req) 29 | log?.error(err?.message ?? err.toString()) 30 | geminiResp = err?.message ?? err.toString() 31 | } 32 | 33 | log?.debug(req) 34 | log?.debug(geminiResp) 35 | 36 | function genOpenAiResp(content: string | FunctionCall): OpenAI.Chat.ChatCompletion { 37 | if (typeof content === "string") { 38 | return { 39 | id: "chatcmpl-abc123", 40 | object: "chat.completion", 41 | created: Math.floor(Date.now() / 1000), 42 | model: model.model, 43 | choices: [ 44 | { 45 | message: { role: "assistant", content: content, refusal: null }, 46 | finish_reason: "stop", 47 | index: 0, 48 | logprobs: null, 49 | }, 50 | ], 51 | } 52 | } 53 | 54 | return { 55 | id: "chatcmpl-abc123", 56 | object: "chat.completion", 57 | created: Math.floor(Date.now() / 1000), 58 | model: model.model, 59 | choices: [ 60 | { 61 | message: { 62 | role: "assistant", 63 | refusal: null, 64 | content: null, 65 | function_call: { 66 | name: content.name ?? "", 67 | arguments: JSON.stringify(content.args), 68 | }, 69 | }, 70 | finish_reason: "function_call", 71 | index: 0, 72 | logprobs: null, 73 | }, 74 | ], 75 | } 76 | } 77 | 78 | return Response.json(genOpenAiResp(geminiResp)) 79 | } 80 | -------------------------------------------------------------------------------- /src/openai/chat/completions/StreamingChatProxyHandler.ts: -------------------------------------------------------------------------------- 1 | import { streamGenerateContent } from "../../../gemini-api-client/gemini-api-client.ts" 2 | import { resultHelper } from "../../../gemini-api-client/response-helper.ts" 3 | import type { FunctionCall } from "../../../gemini-api-client/types.ts" 4 | import type { Logger } from "../../../log.ts" 5 | import type { OpenAI } from "../../../types.ts" 6 | import { type ApiParam, genModel } from "../../../utils.ts" 7 | 8 | export function streamingChatProxyHandler( 9 | req: OpenAI.Chat.ChatCompletionCreateParams, 10 | apiParam: ApiParam, 11 | log?: Logger, 12 | ): Response { 13 | const [model, geminiReq] = genModel(req) 14 | log?.debug("streamGenerateContent request", req) 15 | return sseResponse( 16 | (async function* () { 17 | try { 18 | for await (const it of streamGenerateContent(apiParam, model, geminiReq)) { 19 | log?.debug("streamGenerateContent resp", it) 20 | const data = resultHelper(it) 21 | yield genStreamResp({ 22 | model: model.model, 23 | content: data, 24 | stop: false, 25 | }) 26 | } 27 | } catch (error) { 28 | yield genStreamResp({ 29 | model: model.model, 30 | content: error?.message ?? error.toString(), 31 | stop: true, 32 | }) 33 | } 34 | yield genStreamResp({ model: model.model, content: "", stop: true }) 35 | yield "[DONE]" 36 | return undefined 37 | })(), 38 | ) 39 | } 40 | 41 | function genStreamResp({ 42 | model, 43 | content, 44 | stop, 45 | }: { 46 | model: string 47 | content: string | FunctionCall 48 | stop: boolean 49 | }): OpenAI.Chat.ChatCompletionChunk { 50 | if (typeof content === "string") { 51 | return { 52 | id: "chatcmpl-abc123", 53 | object: "chat.completion.chunk", 54 | created: Math.floor(Date.now() / 1000), 55 | model: model, 56 | choices: [ 57 | { 58 | delta: { role: "assistant", content }, 59 | finish_reason: stop ? "stop" : null, 60 | index: 0, 61 | }, 62 | ], 63 | } satisfies OpenAI.Chat.ChatCompletionChunk 64 | } 65 | 66 | return { 67 | id: "chatcmpl-abc123", 68 | object: "chat.completion.chunk", 69 | created: Math.floor(Date.now() / 1000), 70 | model: model, 71 | choices: [ 72 | { 73 | delta: { role: "assistant", function_call: content }, 74 | finish_reason: stop ? "function_call" : null, 75 | index: 0, 76 | }, 77 | ], 78 | } satisfies OpenAI.Chat.ChatCompletionChunk 79 | } 80 | 81 | const encoder = new TextEncoder() 82 | 83 | function sseResponse(dataStream: AsyncGenerator): Response { 84 | const s = new ReadableStream({ 85 | async pull(controller) { 86 | const { value, done } = await dataStream.next() 87 | if (done) { 88 | controller.close() 89 | } else { 90 | const data = typeof value === "string" ? value : JSON.stringify(value) 91 | controller.enqueue(encoder.encode(toSseMsg({ data }))) 92 | } 93 | }, 94 | }) 95 | 96 | const response = new Response(s, { 97 | status: 200, 98 | headers: new Headers({ 99 | "Content-Type": "text/event-stream", 100 | }), 101 | }) 102 | 103 | return response 104 | } 105 | 106 | export function toSseMsg({ event, data, id }: SseEvent) { 107 | let result = `data: ${data}\n` 108 | if (event) { 109 | result += `event: ${event ?? ""}\n` 110 | } 111 | if (id) { 112 | result += `id: ${id ?? ""}\n` 113 | } 114 | return `${result}\n` 115 | } 116 | 117 | export interface SseEvent { 118 | event?: string 119 | id?: string 120 | data: string 121 | } 122 | -------------------------------------------------------------------------------- /src/openai/embeddingProxyHandler.ts: -------------------------------------------------------------------------------- 1 | import { embedContent } from "../gemini-api-client/gemini-api-client.ts" 2 | import type { EmbedContentRequest } from "../gemini-api-client/types.ts" 3 | import type { OpenAI } from "../types.ts" 4 | import { GeminiModel, getToken } from "../utils.ts" 5 | 6 | export async function embeddingProxyHandler(rawReq: Request): Promise { 7 | const req = (await rawReq.json()) as OpenAI.Embeddings.EmbeddingCreateParams 8 | const log = rawReq.logger 9 | const headers = rawReq.headers 10 | const apiParam = getToken(headers) 11 | if (apiParam == null) { 12 | return new Response("Unauthorized", { status: 401 }) 13 | } 14 | 15 | const embedContentRequest: EmbedContentRequest = { 16 | model: "models/text-embedding-004", 17 | content: { 18 | parts: [req.input].flat().map((it) => ({ text: it.toString() })), 19 | }, 20 | } 21 | 22 | log?.warn("request", embedContentRequest) 23 | 24 | let geminiResp: number[] | undefined = [] 25 | 26 | try { 27 | const it = await embedContent(apiParam, new GeminiModel("text-embedding-004"), embedContentRequest) 28 | const data = it?.embedding?.values 29 | geminiResp = data 30 | } catch (err) { 31 | // 出现异常时打印请求参数和响应,以便调试 32 | log?.error(req) 33 | log?.error(err?.message ?? err.toString()) 34 | geminiResp = err?.message ?? err.toString() 35 | } 36 | 37 | log?.debug(req) 38 | log?.debug(geminiResp) 39 | 40 | const resp: OpenAI.Embeddings.CreateEmbeddingResponse = { 41 | object: "list", 42 | data: [ 43 | { 44 | object: "embedding", 45 | index: 0, 46 | embedding: geminiResp ?? [], 47 | }, 48 | ], 49 | model: req.model, 50 | usage: { 51 | prompt_tokens: 5, 52 | total_tokens: 5, 53 | }, 54 | } 55 | 56 | return Response.json(resp) 57 | } 58 | -------------------------------------------------------------------------------- /src/openai/models.ts: -------------------------------------------------------------------------------- 1 | import type { OpenAI } from "../types.ts" 2 | import { ModelMapping } from "../utils.ts" 3 | export const modelData: OpenAI.Models.Model[] = Object.keys(ModelMapping).map((model) => ({ 4 | created: 1677610602, 5 | object: "model", 6 | owned_by: "openai", 7 | id: model, 8 | })) 9 | 10 | export const models = () => { 11 | return { 12 | object: "list", 13 | data: modelData, 14 | } 15 | } 16 | 17 | export const modelDetail = (model: string) => { 18 | return modelData.find((it) => it.id === model) 19 | } 20 | -------------------------------------------------------------------------------- /src/reset.d.ts: -------------------------------------------------------------------------------- 1 | // Do not add any other lines of code to this file! 2 | import "@total-typescript/ts-reset" 3 | import "@total-typescript/ts-reset/dom" 4 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | import type { components } from "./generated-types/openai-types.ts" 2 | // deno-lint-ignore-file no-namespace 3 | import type { Logger } from "./log.ts" 4 | 5 | export declare namespace OpenAI { 6 | namespace Chat { 7 | type ChatCompletionCreateParams = components["schemas"]["CreateChatCompletionRequest"] 8 | type ChatCompletionChunk = components["schemas"]["CreateChatCompletionStreamResponse"] 9 | type ChatCompletion = components["schemas"]["CreateChatCompletionResponse"] 10 | type ChatCompletionMessageParam = components["schemas"]["ChatCompletionRequestMessage"] 11 | type CreateEmbeddingRequest = components["schemas"]["CreateEmbeddingRequest"] 12 | type FunctionObject = components["schemas"]["FunctionObject"] 13 | } 14 | namespace Models { 15 | type ModelsPage = components["schemas"]["ListModelsResponse"] 16 | type Model = components["schemas"]["Model"] 17 | } 18 | namespace Embeddings { 19 | type EmbeddingCreateParams = components["schemas"]["CreateEmbeddingRequest"] 20 | type CreateEmbeddingResponse = components["schemas"]["CreateEmbeddingResponse"] 21 | } 22 | } 23 | 24 | // export type { OpenAI } from "https://deno.land/x/openai@v4.28.0/mod.ts" 25 | 26 | declare global { 27 | interface Request { 28 | logger: Logger | undefined 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/utils.ts: -------------------------------------------------------------------------------- 1 | import type { Content, GenerateContentRequest, JsonSchema, Part } from "./gemini-api-client/types.ts" 2 | import type { Any } from "./log.ts" 3 | import type { OpenAI } from "./types.ts" 4 | 5 | export interface ApiParam { 6 | apikey: string 7 | useBeta: boolean 8 | } 9 | 10 | export function getToken(headers: Iterable<[string, string]>): ApiParam | null { 11 | for (const [k, v] of headers) { 12 | if (k.toLowerCase() !== "authorization") continue 13 | 14 | const rawApikey = v.substring(v.indexOf(" ") + 1) 15 | 16 | if (!rawApikey.includes("#")) { 17 | return { 18 | apikey: rawApikey, 19 | useBeta: false, 20 | } 21 | } 22 | 23 | // todo read config from apikey 24 | const apikey = rawApikey.substring(0, rawApikey.indexOf("#")) 25 | const params = new URLSearchParams(rawApikey.substring(rawApikey.indexOf("#") + 1)) 26 | return { 27 | apikey, 28 | useBeta: params.has("useBeta"), 29 | } 30 | } 31 | return null 32 | } 33 | 34 | function parseBase64(base64: string): Part { 35 | if (!base64.startsWith("data:")) { 36 | return { text: "" } 37 | } 38 | 39 | const [m, data, ..._arr] = base64.split(",") 40 | const mimeType = m.match(/:(?.*?);/)?.groups?.mime ?? "img/png" 41 | return { 42 | inlineData: { 43 | mimeType, 44 | data, 45 | }, 46 | } 47 | } 48 | 49 | export function openAiMessageToGeminiMessage(messages: OpenAI.Chat.ChatCompletionMessageParam[]): Content[] { 50 | const result: Content[] = messages.flatMap(({ role, content }) => { 51 | if (role === "system") { 52 | return [ 53 | { 54 | role: "user", 55 | parts: typeof content !== "string" ? content : [{ text: content }], 56 | }, 57 | ] satisfies Content[] as Content[] 58 | } 59 | const parts: Part[] = 60 | content == null || typeof content === "string" 61 | ? [{ text: content?.toString() ?? "" }] 62 | : content.map((item) => { 63 | if (item.type === "text") return { text: item.text } 64 | if (item.type === "image_url") return parseBase64(item.image_url.url) 65 | return { text: "OK" } 66 | }) 67 | return [{ role: "user" === role ? "user" : "model", parts: parts }] 68 | }) 69 | 70 | return result 71 | } 72 | 73 | export function genModel(req: OpenAI.Chat.ChatCompletionCreateParams): [GeminiModel, GenerateContentRequest] { 74 | const model: GeminiModel = GeminiModel.modelMapping(req.model) 75 | 76 | let functions: OpenAI.Chat.FunctionObject[] = 77 | req.tools?.filter((it) => it.type === "function")?.map((it) => it.function) ?? [] 78 | 79 | functions = functions.concat((req.functions ?? []).map((it) => ({ strict: null, ...it }))) 80 | 81 | const [responseMimeType, responseSchema] = (() => { 82 | switch (req.response_format?.type) { 83 | case "json_object": 84 | return ["application/json", undefined] 85 | case "json_schema": 86 | return ["application/json", req.response_format.json_schema.schema satisfies JsonSchema | undefined] 87 | case "text": 88 | return ["text/plain", undefined] 89 | default: 90 | return [undefined, undefined] 91 | } 92 | })() 93 | 94 | const generateContentRequest: GenerateContentRequest = { 95 | contents: openAiMessageToGeminiMessage(req.messages), 96 | generationConfig: { 97 | maxOutputTokens: req.max_completion_tokens ?? undefined, 98 | temperature: req.temperature ?? undefined, 99 | topP: req.top_p ?? undefined, 100 | responseMimeType: responseMimeType, 101 | responseSchema: responseSchema, 102 | thinkingConfig: !model.isThinkingModel() 103 | ? undefined 104 | : { 105 | includeThoughts: true, 106 | }, 107 | }, 108 | tools: 109 | functions.length === 0 110 | ? undefined 111 | : [ 112 | { 113 | functionDeclarations: functions, 114 | }, 115 | ], 116 | safetySettings: ( 117 | [ 118 | "HARM_CATEGORY_HATE_SPEECH", 119 | "HARM_CATEGORY_SEXUALLY_EXPLICIT", 120 | "HARM_CATEGORY_DANGEROUS_CONTENT", 121 | "HARM_CATEGORY_HARASSMENT", 122 | ] as const 123 | ).map((category) => ({ 124 | category, 125 | threshold: "BLOCK_NONE", 126 | })), 127 | } 128 | return [model, generateContentRequest] 129 | } 130 | export type KnownGeminiModel = 131 | | "gemini-1.5-pro-latest" 132 | | "gemini-1.5-flash-latest" 133 | | "gemini-1.5-flash-8b-latest" 134 | | "gemini-2.0-flash-exp" 135 | | "text-embedding-004" 136 | 137 | export type API_VERSION = "v1beta" | "v1" | "v1alpha" 138 | 139 | export class GeminiModel { 140 | static modelMapping(model: string | undefined): GeminiModel { 141 | const modelName: GeminiModelName | KnownGeminiModel = 142 | ModelMapping[model ?? ""] ?? GeminiModel.defaultModel(model ?? "") 143 | return new GeminiModel(modelName) 144 | } 145 | public readonly model: GeminiModelName 146 | constructor(model: GeminiModelName) { 147 | this.model = model 148 | } 149 | 150 | isThinkingModel(): boolean { 151 | return this.model.includes("thinking") 152 | } 153 | 154 | apiVersion(): API_VERSION { 155 | if (this.isThinkingModel()) { 156 | return "v1alpha" 157 | } 158 | return "v1beta" 159 | } 160 | 161 | toString(): string { 162 | return this.model 163 | } 164 | 165 | private static defaultModel(m: string): GeminiModelName { 166 | if (m.startsWith("gemini")) { 167 | return m as GeminiModelName 168 | } 169 | return "gemini-1.5-flash-latest" 170 | } 171 | } 172 | 173 | export type GeminiModelName = `gemini${string}` | "text-embedding-004" 174 | 175 | export const ModelMapping: Readonly> = { 176 | "gpt-3.5-turbo": "gemini-1.5-flash-8b-latest", 177 | "gpt-4": "gemini-1.5-pro-latest", 178 | "gpt-4o": "gemini-1.5-flash-latest", 179 | "gpt-4o-mini": "gemini-1.5-flash-8b-latest", 180 | "gpt-4-vision-preview": "gemini-1.5-flash-latest", 181 | "gpt-4-turbo": "gemini-1.5-pro-latest", 182 | "gpt-4-turbo-preview": "gemini-2.0-flash-exp", 183 | } 184 | 185 | export function getRuntimeKey() { 186 | const global = globalThis as typeof globalThis & Record 187 | if (global?.Deno !== undefined) { 188 | return "deno" 189 | } 190 | if (global?.Bun !== undefined) { 191 | return "bun" 192 | } 193 | if (typeof global?.WebSocketPair === "function") { 194 | return "workerd" 195 | } 196 | if (typeof global?.EdgeRuntime === "string") { 197 | return "edge-light" 198 | } 199 | if (global?.fastly !== undefined) { 200 | return "fastly" 201 | } 202 | if (global?.process?.release?.name === "node") { 203 | return "node" 204 | } 205 | return "other" 206 | } 207 | -------------------------------------------------------------------------------- /test/chat-completion_test.ts: -------------------------------------------------------------------------------- 1 | import { assertFalse } from "jsr:@std/assert" 2 | import { expect } from "jsr:@std/expect" 3 | import { afterEach, beforeEach, describe, it } from "jsr:@std/testing/bdd" 4 | import { EventSourceParserStream } from "eventsource-parser/stream" 5 | import { app } from "../src/app.ts" 6 | import type { OpenAI } from "../src/types.ts" 7 | import { MockFetch } from "./mock-fetch.ts" 8 | import { gemini_ok_resp } from "./test-data.ts" 9 | 10 | describe("openai to gemini test", () => { 11 | describe("success test", () => { 12 | const fetchMocker = new MockFetch() 13 | 14 | for (const [openaiModel, geminiModel] of [ 15 | ["gpt-3.5-turbo", "gemini-1.5-flash-8b-latest"], 16 | ["gpt-4", "gemini-1.5-pro-latest"], 17 | ["gpt-4o", "gemini-1.5-flash-latest"], 18 | ["gpt-4o-mini", "gemini-1.5-flash-8b-latest"], 19 | ["gpt-4-vision-preview", "gemini-1.5-flash-latest"], 20 | ["gpt-4-turbo", "gemini-1.5-pro-latest"], 21 | ["gpt-4-turbo-preview", "gemini-2.0-flash-exp"], 22 | ]) { 23 | beforeEach(() => { 24 | fetchMocker.mock( 25 | (req) => req.url.includes(`generativelanguage.googleapis.com/v1beta/models/${geminiModel}:generateContent`), 26 | () => Response.json(gemini_ok_resp), 27 | ) 28 | 29 | fetchMocker.mock( 30 | (req) => 31 | req.url.includes(`generativelanguage.googleapis.com/v1beta/models/${geminiModel}:streamGenerateContent`), 32 | () => new Response(`data: ${JSON.stringify(gemini_ok_resp)}\n\n`), 33 | ) 34 | }) 35 | 36 | afterEach(() => { 37 | fetchMocker.restore() 38 | }) 39 | 40 | it(`no streaming test with ${openaiModel}`, async () => { 41 | const res = await app.fetch( 42 | new Request("http://127.0.0.1/v1/chat/completions", { 43 | headers: { 44 | authorization: "Bearer fake-api-key", 45 | }, 46 | method: "post", 47 | body: JSON.stringify({ 48 | model: openaiModel, 49 | messages: [ 50 | { 51 | role: "user", 52 | content: "Hello", 53 | }, 54 | ], 55 | stream: false, 56 | temperature: 0.7, 57 | logprobs: null, 58 | logit_bias: null, 59 | frequency_penalty: null, 60 | presence_penalty: null, 61 | stop: null, 62 | top_p: 1, 63 | service_tier: "auto", 64 | n: 1, 65 | store: false, 66 | reasoning_effort: "medium", 67 | } satisfies OpenAI.Chat.ChatCompletionCreateParams), 68 | }), 69 | ) 70 | 71 | const resp = (await res.json()) as OpenAI.Chat.ChatCompletion 72 | console.log(resp) 73 | expect(resp.choices.map((it) => it.message.content).join("")).toEqual( 74 | "Hello there! How can I assist you today?", 75 | ) 76 | }) 77 | 78 | it(`stream test with ${openaiModel}`, async () => { 79 | const res = await app.fetch( 80 | new Request("http://127.0.0.1/v1/chat/completions", { 81 | headers: { 82 | authorization: "Bearer fake-api-key", 83 | }, 84 | method: "post", 85 | body: JSON.stringify({ 86 | model: openaiModel, 87 | messages: [ 88 | { 89 | role: "user", 90 | content: "Hello", 91 | }, 92 | ], 93 | temperature: 0.7, 94 | stream: true, 95 | logprobs: null, 96 | logit_bias: null, 97 | frequency_penalty: null, 98 | presence_penalty: null, 99 | stop: null, 100 | top_p: 1, 101 | service_tier: "auto", 102 | n: 1, 103 | store: false, 104 | reasoning_effort: "medium", 105 | } satisfies OpenAI.Chat.ChatCompletionCreateParams), 106 | }), 107 | ) 108 | 109 | const body = res.body ?? throws(new Error("no body")) 110 | const values = body.pipeThrough(new TextDecoderStream()).pipeThrough(new EventSourceParserStream()).values() 111 | 112 | for await (const e of values) { 113 | if (e.data === "[DONE]") return 114 | const data = JSON.parse(e.data) as OpenAI.Chat.ChatCompletion 115 | assertFalse(false) 116 | assertFalse(data.choices.find((it) => it.finish_reason === "stop" && it.message?.content)) 117 | } 118 | }) 119 | } 120 | }) 121 | }) 122 | 123 | function throws(e: Error): never { 124 | throw e 125 | } 126 | -------------------------------------------------------------------------------- /test/get_token_test.ts: -------------------------------------------------------------------------------- 1 | import { assertEquals } from "jsr:@std/assert" 2 | import { getToken } from "../src/utils.ts" 3 | import type { ApiParam } from "../src/utils.ts" 4 | 5 | Deno.test("get token_test", () => { 6 | function checkToken(rec: Record, res: ApiParam | null) { 7 | assertEquals(getToken(Object.entries(rec)), res) 8 | } 9 | 10 | checkToken({ authorization: "Bearer my_key" }, { apikey: "my_key", useBeta: false }) 11 | checkToken({ other_key: "Bearer my_key" }, null) 12 | checkToken({ other_key: "my_key" }, null) 13 | checkToken({ authorization: "my_key" }, { apikey: "my_key", useBeta: false }) 14 | checkToken({ Authorization: "my_key" }, { apikey: "my_key", useBeta: false }) 15 | checkToken({ Authorization: "Bearer my_key#useBeta" }, { apikey: "my_key", useBeta: true }) 16 | checkToken({ Authorization: "bearer my_key#useBeta" }, { apikey: "my_key", useBeta: true }) 17 | checkToken({ Authorization: "bearer my_key#otherConfig" }, { apikey: "my_key", useBeta: false }) 18 | }) 19 | -------------------------------------------------------------------------------- /test/mock-fetch.ts: -------------------------------------------------------------------------------- 1 | export type RequestHandler = (req: Request) => Response | Promise 2 | export type RequestMatcher = (req: Request) => Promise | boolean 3 | 4 | const originalFetch = globalThis.fetch 5 | 6 | export class MockFetch { 7 | private store: [RequestMatcher, RequestHandler][] = [] 8 | 9 | mock(request: RequestMatcher, response: RequestHandler) { 10 | const store = this.store 11 | store.push([request, response]) 12 | 13 | if (globalThis.fetch !== originalFetch) return 14 | 15 | globalThis.fetch = async (input: string | URL | Request, init?: RequestInit) => { 16 | const originalRequest = new Request(input, init) 17 | 18 | for (const [matcher, handler] of store) { 19 | if (await matcher(originalRequest.clone())) { 20 | return await handler(originalRequest.clone()) 21 | } 22 | } 23 | 24 | return originalFetch(input, init) 25 | } 26 | } 27 | 28 | restore() { 29 | this.store.length = 0 30 | globalThis.fetch = originalFetch 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /test/models_test.ts: -------------------------------------------------------------------------------- 1 | import { expect } from "jsr:@std/expect" 2 | import * as bdd from "jsr:@std/testing/bdd" 3 | import { app } from "../src/app.ts" 4 | 5 | import { modelData } from "../src/openai/models.ts" 6 | import type { OpenAI } from "../src/types.ts" 7 | 8 | bdd.describe("openai model api test", () => { 9 | bdd.it("models test", async () => { 10 | const res = await app.fetch( 11 | new Request("http://127.0.0.1/v1/models", { 12 | method: "GET", 13 | }), 14 | ) 15 | 16 | const { data: models } = (await res.json()) as OpenAI.Models.ModelsPage 17 | console.log(models) 18 | 19 | expect(models).toStrictEqual(modelData) 20 | }) 21 | bdd.it("models/:name test", async () => { 22 | const res = await app.fetch( 23 | new Request("http://127.0.0.1/v1/models/gpt-3.5-turbo", { 24 | method: "GET", 25 | }), 26 | ) 27 | 28 | const model = await res.json() 29 | 30 | expect(model).toStrictEqual(modelData[0]) 31 | }) 32 | }) 33 | -------------------------------------------------------------------------------- /test/test-data.ts: -------------------------------------------------------------------------------- 1 | export const gemini_ok_resp = { 2 | candidates: [ 3 | { 4 | content: { 5 | parts: [ 6 | { 7 | text: "Hello there! How can I assist you today?", 8 | }, 9 | ], 10 | role: "model", 11 | }, 12 | finishReason: "STOP", 13 | index: 0, 14 | safetyRatings: [ 15 | { 16 | category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", 17 | probability: "NEGLIGIBLE", 18 | }, 19 | { 20 | category: "HARM_CATEGORY_HATE_SPEECH", 21 | probability: "NEGLIGIBLE", 22 | }, 23 | { 24 | category: "HARM_CATEGORY_HARASSMENT", 25 | probability: "NEGLIGIBLE", 26 | }, 27 | { 28 | category: "HARM_CATEGORY_DANGEROUS_CONTENT", 29 | probability: "NEGLIGIBLE", 30 | }, 31 | ], 32 | }, 33 | ], 34 | promptFeedback: { 35 | safetyRatings: [ 36 | { 37 | category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", 38 | probability: "NEGLIGIBLE", 39 | }, 40 | { 41 | category: "HARM_CATEGORY_HATE_SPEECH", 42 | probability: "NEGLIGIBLE", 43 | }, 44 | { 45 | category: "HARM_CATEGORY_HARASSMENT", 46 | probability: "NEGLIGIBLE", 47 | }, 48 | { 49 | category: "HARM_CATEGORY_DANGEROUS_CONTENT", 50 | probability: "NEGLIGIBLE", 51 | }, 52 | ], 53 | }, 54 | } 55 | 56 | export const gemini_500_error_resp = { 57 | error: { 58 | code: 500, 59 | message: 60 | "An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting", 61 | status: "INTERNAL", 62 | }, 63 | } 64 | 65 | export const gemini_400_error_resp = { 66 | error: { 67 | code: 400, 68 | message: "User location is not supported for the API use.", 69 | status: "FAILED_PRECONDITION", 70 | }, 71 | } 72 | 73 | export const gemini_400_apikey_error_resp = { 74 | error: { 75 | code: 400, 76 | message: "API Key not found. Please pass a valid API key.", 77 | status: "INVALID_ARGUMENT", 78 | details: [ 79 | { 80 | "@type": "type.googleapis.com/google.rpc.ErrorInfo", 81 | reason: "API_KEY_INVALID", 82 | domain: "googleapis.com", 83 | metadata: { 84 | service: "generativelanguage.googleapis.com", 85 | }, 86 | }, 87 | ], 88 | }, 89 | } 90 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowImportingTsExtensions": true, 4 | "noEmit": true, 5 | "esModuleInterop": true, 6 | "useUnknownInCatchVariables": false, 7 | "module": "Node16", 8 | "moduleResolution": "Node16", 9 | "lib": [ 10 | // 11 | "ESNext.Array", 12 | "DOM", 13 | "DOM.AsyncIterable", 14 | "DOM.Iterable", 15 | "ESNext", 16 | "ESNext.Disposable", 17 | "ESNext.AsyncIterable" 18 | ] 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /vercel.json: -------------------------------------------------------------------------------- 1 | { 2 | "rewrites": [ 3 | { 4 | "source": "/(.*)", 5 | "destination": "api/handler" 6 | } 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /zbpack.json: -------------------------------------------------------------------------------- 1 | { 2 | "serverless": true, 3 | "start_command": "node dist/main_node.mjs" 4 | } 5 | --------------------------------------------------------------------------------