├── .eslintignore ├── .github ├── assets │ ├── logo-dark-mode.svg │ └── logo-light-mode.svg └── workflows │ └── ci.yaml ├── .gitignore ├── .npmrc ├── .prettierignore ├── .prettierrc ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── apps ├── demo │ ├── .eslintrc.json │ ├── .gitignore │ ├── README.md │ ├── app │ │ ├── api │ │ │ └── chat │ │ │ │ └── route.ts │ │ ├── chat │ │ │ └── page.tsx │ │ ├── favicon.ico │ │ ├── globals.css │ │ ├── layout.tsx │ │ ├── page.tsx │ │ └── providers │ │ │ └── LoadingProvider.tsx │ ├── components │ │ ├── LLMSelector.tsx │ │ └── Message.tsx │ ├── next.config.js │ ├── package.json │ ├── postcss.config.js │ ├── public │ │ ├── next.svg │ │ └── vercel.svg │ ├── tailwind.config.ts │ ├── tsconfig.json │ └── utils │ │ └── types.ts └── docs │ ├── .github │ └── screenshot.png │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── assets │ └── logo.svg │ ├── components │ ├── DynamicCodeExample.tsx │ ├── counters.module.css │ └── counters.tsx │ ├── next-env.d.ts │ ├── next.config.js │ ├── package.json │ ├── pages │ ├── _app.tsx │ ├── _meta.json │ ├── index.mdx │ └── providers-and-models │ │ ├── _meta.json │ │ ├── anthropic.mdx │ │ ├── azure-openai.mdx │ │ └── openai.mdx │ ├── pnpm-lock.yaml │ ├── postcss.config.js │ ├── public │ └── favicon │ │ ├── android-chrome-192x192.png │ │ ├── android-chrome-512x512.png │ │ ├── apple-touch-icon.png │ │ ├── favicon-16x16.png │ │ ├── favicon-32x32.png │ │ ├── favicon.ico │ │ └── site.webmanifest │ ├── styles │ └── globals.css │ ├── tailwind.config.js │ ├── theme.config.tsx │ └── tsconfig.json ├── package-lock.json ├── package.json ├── packages ├── eslint-config-custom │ ├── README.md │ ├── library.js │ ├── next.js │ ├── package.json │ └── react-internal.js ├── llm-repo │ ├── images │ │ ├── anthropic.png │ │ ├── azure.png │ │ └── openai.png │ ├── index.ts │ ├── logos.ts │ ├── package.json │ └── tsconfig.json ├── tsconfig │ ├── base.json │ ├── nextjs.json │ ├── package.json │ └── react-library.json └── unillm-node │ ├── .eslintrc.json │ ├── index.ts │ ├── package.json │ ├── providers │ ├── anthropic.ts │ ├── azure-openai.ts │ ├── baseProvider.ts │ └── openai.ts │ ├── rollup.config.js │ ├── tests │ ├── anthropic.test.ts │ ├── azure-openai.test.ts │ ├── openai.test.ts │ └── utils │ │ ├── test-data.util.ts │ │ └── validation.util.ts │ ├── tsconfig.json │ ├── turbo │ └── generators │ │ ├── config.ts │ │ └── templates │ │ └── component.hbs │ ├── utils │ ├── UnifiedErrorResponse.ts │ ├── properties.ts │ └── types.ts │ └── vite.config.ts ├── tsconfig.json └── turbo.json /.eslintignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | apps/docs -------------------------------------------------------------------------------- /.github/assets/logo-dark-mode.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | 491 | 492 | 493 | 494 | 495 | 496 | 497 | 498 | 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 522 | 523 | 524 | 525 | 526 | 527 | 528 | 529 | 530 | 531 | 532 | 533 | 534 | 535 | 536 | 537 | 538 | 539 | 540 | 541 | 542 | 543 | 544 | 545 | 546 | 547 | -------------------------------------------------------------------------------- /.github/assets/logo-light-mode.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | 491 | 492 | 493 | 494 | 495 | 496 | 497 | 498 | 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 522 | 523 | 524 | 525 | 526 | 527 | 528 | 529 | 530 | 531 | 532 | 533 | 534 | 535 | 536 | 537 | 538 | 539 | 540 | 541 | 542 | 543 | 544 | 545 | 546 | 547 | 548 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: 7 | - "*" 8 | paths-ignore: 9 | - "**/*.md" 10 | - "docs" 11 | push: 12 | branches: 13 | - "*" 14 | paths-ignore: 15 | - "**/*.md" 16 | - "docs" 17 | 18 | jobs: 19 | ci: 20 | name: Continuous Integration 21 | runs-on: ubuntu-latest 22 | permissions: 23 | contents: read 24 | packages: write 25 | steps: 26 | - uses: actions/checkout@v3 27 | with: 28 | fetch-depth: 0 29 | 30 | - name: Cache node modules 31 | id: cache 32 | uses: actions/cache@v3 33 | with: 34 | path: | 35 | node_modules 36 | key: cache-node-modules-${{ hashFiles('**/package-lock.json') }} 37 | 38 | - uses: actions/setup-node@v3 39 | if: steps.cache.outputs.cache-hit != 'true' 40 | with: 41 | node-version: 18.x 42 | 43 | - name: Install Dependencies 44 | if: steps.cache.outputs.cache-hit != 'true' 45 | run: npm ci 46 | 47 | - name: Check Formatting 48 | run: npx prettier . --check 49 | 50 | - name: Lint 51 | run: npx turbo run lint 52 | 53 | - name: Test 54 | run: npx turbo run test 55 | env: 56 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 57 | ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} 58 | AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }} 59 | AZURE_OPENAI_ENDPOINT: ${{ secrets.AZURE_OPENAI_ENDPOINT }} 60 | AZURE_OPENAI_DEPLOYMENT: ${{ secrets.AZURE_OPENAI_DEPLOYMENT }} 61 | 62 | - name: Build 63 | run: npx turbo run build 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | node_modules 5 | .pnp 6 | .pnp.js 7 | 8 | # testing 9 | coverage 10 | 11 | # next.js 12 | .next/ 13 | out/ 14 | build 15 | dist 16 | 17 | # misc 18 | .DS_Store 19 | *.pem 20 | 21 | # debug 22 | npm-debug.log* 23 | yarn-debug.log* 24 | yarn-error.log* 25 | 26 | # local env files 27 | .env 28 | .env.local 29 | .env.development.local 30 | .env.test.local 31 | .env.production.local 32 | 33 | # turbo 34 | .turbo 35 | 36 | # vercel 37 | .vercel 38 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | auto-install-peers = true 2 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # Add files here to ignore them from prettier formatting 2 | **/.next 3 | apps/docs 4 | **/dist -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": false 3 | } 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | - Demonstrating empathy and kindness toward other people 21 | - Being respectful of differing opinions, viewpoints, and experiences 22 | - Giving and gracefully accepting constructive feedback 23 | - Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | - Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | - The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | - Trolling, insulting or derogatory comments, and personal or political attacks 33 | - Public or private harassment 34 | - Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | - Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | hello@pezzo.ai. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0. 119 | 120 | [homepage]: http://contributor-covenant.org 121 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We opened sourced UniLLM because we believe in the power of community. We believe you can help making UniLLM better! 4 | We are excited to see what you will build with UniLLM and we are looking forward to your contributions. We want to make contributing to this project as easy and transparent as possible, whether it's features, bug fixes, documentation updates, guides, examples and more. 5 | 6 | ## How can I contribute? 7 | 8 | Ready to contribute but seeking guidance, we have several avenues to assist you. Explore the upcoming segment for clarity on the kind of contributions we appreciate and how to jump in. Reach out directly to the UniLLM team on [Discord](https://pezzo.cc/discord) for immediate assistance! Alternatively, you're welcome to raise an issue and one of our dedicated maintainers will promptly steer you in the right direction! 9 | 10 | ## Found a bug? 11 | 12 | If you find a bug in the source code, you can help us by [creating an issue](https://github.com/pezzolabs/unillm/issues/new) to our GitHub Repository. Even better, you can submit a Pull Request with a fix. 13 | 14 | ## Missing a feature? 15 | 16 | So, you've got an awesome feature in mind? Throw it over to us by [creating an issue](https://github.com/pezzolabs/unillm/issues/new) on our GitHub Repo. 17 | 18 | Planning to code a feature yourself? We love the enthusiasm, but hang on, always good to have a little chinwag with us before you burn that midnight oil. Unfortunately, not every feature might fit into our plans. 19 | 20 | - Dreaming big? Kick off by opening an issue and sketch out your cool ideas. Helps us all stay on the same page, avoid doing the same thing twice, and ensures your hard work gels well into the project. 21 | - Cooking up something small? Just craft it and [shoot it straight as a Pull Request](#submit-pr). 22 | 23 | ## What do you need to know to help? 24 | 25 | If you want to help out with a code contribution, our project uses the following stack: 26 | 27 | - TypeScript 28 | - Node.js 29 | - Various APIs/SDKs of LLM providers 30 | 31 | If you don't feel ready to make a code contribution yet, no problem! You can also improve our documentation. 32 | 33 | # How do I make a code contribution? 34 | 35 | ## Good first issues 36 | 37 | Are you new to open source contribution? Wondering how contributions work in our project? Here's a quick rundown. 38 | 39 | Find an issue that you're interested in addressing, or a feature that you'd like to add. 40 | You can use [this view](https://github.com/pezzolabs/unillm/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) which helps new contributors find easy gateways into our project. 41 | 42 | ## Step 1: Make a fork 43 | 44 | Fork the UniLLM repository to your GitHub organization/account. This means that you'll have a copy of the repository under _your-GitHub-username/repository-name_. 45 | 46 | ## Step 2: Clone the repository to your local machine 47 | 48 | ``` 49 | git clone https://github.com/{your-GitHub-username}/unillm.git 50 | 51 | ``` 52 | 53 | ## Step 3: Prepare the development environment 54 | 55 | Set up and run the development environment on your local machine: 56 | 57 | **BEFORE** you run the following steps make sure: 58 | 59 | 1. You have typescript installed locally on you machine `npm install -g typescript` 60 | 2. You are using node version: ^18.16.0 || ^14.0.0" 61 | 3. You are using npm version: ^8.1.0 || ^7.3.0" 62 | 4. You have `docker` installed and running on your machine 63 | 64 | ```shell 65 | cd unillm 66 | npm install 67 | ``` 68 | 69 | ## Step 4: Create a branch 70 | 71 | Create a new branch for your changes. 72 | In order to keep branch names uniform and easy-to-understand, please use the following conventions for branch naming. 73 | Generally speaking, it is a good idea to add a group/type prefix to a branch. 74 | Here is a list of good examples: 75 | 76 | - for docs change : docs/{ISSUE_NUMBER}-{CUSTOM_NAME} 77 | - for new features : feat/{ISSUE_NUMBER}-{CUSTOM_NAME} 78 | - for bug fixes : fix/{ISSUE_NUMBER}-{CUSTOM_NAME} 79 | 80 | ```jsx 81 | git checkout -b branch-name-here 82 | ``` 83 | 84 | ## Step 5: Make your changes 85 | 86 | Update the code with your bug fix or new feature. 87 | 88 | ## Step 6: Add the changes that are ready to be committed 89 | 90 | Stage the changes that are ready to be committed: 91 | 92 | ```jsx 93 | git add . 94 | ``` 95 | 96 | ## Step 7: Commit the changes (Git) 97 | 98 | Commit the changes with a short message. (See below for more details on how we structure our commit messages) 99 | 100 | ```jsx 101 | git commit -m "(): " 102 | ``` 103 | 104 | ## Step 8: Push the changes to the remote repository 105 | 106 | Push the changes to the remote repository using: 107 | 108 | ```jsx 109 | git push origin branch-name-here 110 | ``` 111 | 112 | ## Step 9: Create Pull Request 113 | 114 | In GitHub, do the following to submit a pull request to the upstream repository: 115 | 116 | 1. Give the pull request a title and a short description of the changes made. Include also the issue or bug number associated with your change. Explain the changes that you made, any issues you think exist with the pull request you made, and any questions you have for the maintainer. 117 | 118 | Remember, it's okay if your pull request is not perfect (no pull request ever is). The reviewer will be able to help you fix any problems and improve it! 119 | 120 | 2. Wait for the pull request to be reviewed by a maintainer. 121 | 122 | 3. Make changes to the pull request if the reviewing maintainer recommends them. 123 | 124 | Celebrate your success after your pull request is merged :-) 125 | 126 | ## Git Commit Messages 127 | 128 | We structure our commit messages like this: 129 | 130 | ``` 131 | (): 132 | ``` 133 | 134 | Examples: 135 | 136 | ``` 137 | fix(docs): fix title 138 | fix(unillm-node): better error handling 139 | ``` 140 | 141 | ### Types: 142 | 143 | - **feat**: A new feature 144 | - **fix**: A bug fix 145 | - **docs**: Changes to the documentation 146 | - **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc.) 147 | - **refactor**: A code change that neither fixes a bug nor adds a feature 148 | - **perf**: A code change that improves performance 149 | - **test**: Adding missing or correcting existing tests 150 | - **chore**: Changes to the build process or auxiliary tools and libraries such as documentation generation 151 | 152 | ### Packages: 153 | 154 | - **`unillm-node`**: Node.js SDK for UniLLM 155 | - **`apps/demo`**: Demo application 156 | - **`apps/docs`**: UniLLM's official documentation 157 | 158 | ## Code of conduct 159 | 160 | Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. 161 | 162 | [Code of Conduct](https://github.com/pezzolabs/unillm/blob/main/CODE_OF_CONDUCT.md) 163 | 164 | Our Code of Conduct means that you are responsible for treating everyone on the project with respect and courtesy. 165 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Pezzo, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | logo 4 | 5 |

6 | 7 |

8 | UniLLM allows you to call any LLM using the OpenAI API, with 100% type safety. 9 |

10 | 11 |

12 | 13 | 14 | Contributor Covenant 15 | 16 | 17 | License 18 | 19 | 20 | 21 | 22 |

23 | 24 |

25 | 26 |

27 | 28 | # Benefits 29 | 30 | - ✨ Integrate with any provider and model using the OpenAI API 31 | - 💬 Consistent chatCompletion responses and logs across all models and providers 32 | - 💯 Type safety across all providers and models 33 | - 🔁 Seamlessly switch between LLMs without rewriting your codebase 34 | - ✅ If you write tests for your service, you only need to test it once 35 | - 🔜 (Coming Soon) Request caching and rate limiting 36 | - 🔜 (Coming Soon) Cost monitoring and alerting 37 | 38 | # Usage 39 | 40 | ## [✨ Check our interactive documentation ✨](https://docs.unillm.ai) 41 | 42 | ## 💬 Chat Completions 43 | 44 | With UniLLM, you can use chat completions even for providers/models that don't natively support it (e.g. Anthropic). 45 | 46 | ```bash 47 | npm i unillm 48 | ``` 49 | 50 | ```ts 51 | import { UniLLM } from 'unillm'; 52 | 53 | const unillm = new UniLLM(); 54 | 55 | // OpenAI 56 | const response = await unillm.createChatCompletion("openai/gpt-3.5-turbo", { messages: ... }); 57 | const response = await unillm.createChatCompletion("openai/gpt-4", { messages: ... }); 58 | 59 | // Anthropic 60 | const response = await unillm.createChatCompletion("anthropic/claude-2", { messages: ... }); 61 | const response = await unillm.createChatCompletion("anthropic/claude-1-instant", { messages: ... }); 62 | 63 | // Azure OpenAI 64 | const response = await unillm.createChatCompletion("azure/openai/", { messages: ... }); 65 | 66 | // More coming soon! 67 | ``` 68 | 69 | Want to see more examples? Check out the **[interactive docs](https://docs.unillm.ai)**. 70 | 71 | ## ⚡️ Streaming 72 | 73 | To enable streaming, simply provide `stream: true` in the options object. Here is an example: 74 | 75 | ```ts 76 | const response = await unillm.createChatCompletion("openai/gpt-3.5-turbo", { 77 | messages: ..., 78 | stream: true 79 | }); 80 | ``` 81 | 82 | Want to see more examples? Check out the **[interactive docs](https://docs.unillm.ai)**. 83 | 84 | # Contributing 85 | 86 | We welcome contributions from the community! Please feel free to submit pull requests or create issues for bugs or feature suggestions. 87 | 88 | If you want to contribute but not sure how, join our [Discord](https://discord.gg/XcEVPePwn2) and we'll be happy to help you out! 89 | 90 | Please check out [CONTRIBUTING.md](CONTRIBUTING.md) before contributing. 91 | 92 | # License 93 | 94 | This repository's source code is available under the [MIT](LICENSE). 95 | -------------------------------------------------------------------------------- /apps/demo/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /apps/demo/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | 27 | # local env files 28 | .env*.local 29 | 30 | # vercel 31 | .vercel 32 | 33 | # typescript 34 | *.tsbuildinfo 35 | next-env.d.ts 36 | -------------------------------------------------------------------------------- /apps/demo/README.md: -------------------------------------------------------------------------------- 1 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). 2 | 3 | ## Getting Started 4 | 5 | First, run the development server: 6 | 7 | ```bash 8 | npm run dev 9 | # or 10 | yarn dev 11 | # or 12 | pnpm dev 13 | # or 14 | bun dev 15 | ``` 16 | 17 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 18 | 19 | You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. 20 | 21 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. 22 | 23 | ## Learn More 24 | 25 | To learn more about Next.js, take a look at the following resources: 26 | 27 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 28 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. 29 | 30 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! 31 | 32 | ## Deploy on Vercel 33 | 34 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 35 | 36 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. 37 | -------------------------------------------------------------------------------- /apps/demo/app/api/chat/route.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIStream, StreamingTextResponse } from "ai"; 2 | import { UniLLM } from "unillm"; 3 | 4 | export async function POST(req: Request) { 5 | const { messages, llm } = await req.json(); 6 | 7 | const unillm = new UniLLM(); 8 | 9 | const response = await unillm.createChatCompletion(llm, { 10 | temperature: 0, 11 | max_tokens: 500, 12 | messages: [...messages], 13 | stream: true, 14 | }); 15 | 16 | const stream = OpenAIStream(response); 17 | return new StreamingTextResponse(stream); 18 | } 19 | -------------------------------------------------------------------------------- /apps/demo/app/chat/page.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { LLMSelector } from "@/components/LLMSelector"; 4 | import { LLMs } from "@/utils/types"; 5 | import { Message } from "../../components/Message"; 6 | import { ChatBubbleIcon, PaperPlaneIcon } from "@radix-ui/react-icons"; 7 | import { Heading, IconButton, TextField } from "@radix-ui/themes"; 8 | import { useChat } from "ai/react"; 9 | import { useEffect, useRef, useState } from "react"; 10 | 11 | export default function ChatPage() { 12 | const defaultLLM = Object.keys(LLMs)[0]; 13 | const [llm, setLLM] = useState(defaultLLM); 14 | 15 | const body = { 16 | llm, 17 | }; 18 | 19 | const { messages, input, handleInputChange, handleSubmit } = useChat({ 20 | api: "/api/chat", 21 | initialMessages: [], 22 | body, 23 | }); 24 | const form = useRef(null); 25 | const messagesEndRef = useRef(null); 26 | 27 | useEffect(() => { 28 | if (messagesEndRef.current) { 29 | messagesEndRef.current.scrollTop = messagesEndRef.current.scrollHeight; 30 | } 31 | }, [messages]); 32 | 33 | return ( 34 |
35 |
36 |
37 |
38 |
39 | 40 | setLLM(value)} /> 41 | 42 |
43 |
44 |
45 |
46 |
47 | {messages.map((m, index) => ( 48 | 49 | ))} 50 |
51 |
52 | 53 |
54 |
55 | 56 | 57 | 58 | 59 |
60 | 66 |
67 | 73 | 74 | 75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 | ); 83 | } 84 | -------------------------------------------------------------------------------- /apps/demo/app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/demo/app/favicon.ico -------------------------------------------------------------------------------- /apps/demo/app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | @media (prefers-color-scheme: dark) { 6 | :root { 7 | --foreground-rgb: 255, 255, 255; 8 | --background-start-rgb: 0, 0, 0; 9 | --background-end-rgb: 0, 0, 0; 10 | } 11 | } 12 | 13 | body { 14 | color: rgb(var(--foreground-rgb)); 15 | background: linear-gradient( 16 | to bottom, 17 | transparent, 18 | rgb(var(--background-end-rgb)) 19 | ) 20 | rgb(var(--background-start-rgb)); 21 | } 22 | 23 | html { 24 | @apply h-full; 25 | } 26 | body { 27 | @apply h-full; 28 | } 29 | div#__next { 30 | @apply h-full; 31 | } 32 | main { 33 | @apply h-full; 34 | } 35 | -------------------------------------------------------------------------------- /apps/demo/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import "./globals.css"; 2 | import type { Metadata } from "next"; 3 | import { Inter } from "next/font/google"; 4 | import "@radix-ui/themes/styles.css"; 5 | import { Theme } from "@radix-ui/themes"; 6 | import cn from "classnames"; 7 | import { LoadingProvider } from "./providers/LoadingProvider"; 8 | 9 | const inter = Inter({ subsets: ["latin"] }); 10 | 11 | export const metadata: Metadata = { 12 | title: "ChatPDF by Ariel Weinberger", 13 | description: "Chat with your PDF files!", 14 | }; 15 | 16 | export default function RootLayout({ 17 | children, 18 | }: { 19 | children: React.ReactNode; 20 | }) { 21 | return ( 22 | 23 | 29 | 30 | 31 |
32 |
{children}
33 | 43 |
44 |
45 |
46 | 47 | 48 | ); 49 | } 50 | -------------------------------------------------------------------------------- /apps/demo/app/page.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useRouter } from "next/navigation"; 4 | 5 | export default function Home() { 6 | const router = useRouter(); 7 | router.push("/chat"); 8 | } 9 | -------------------------------------------------------------------------------- /apps/demo/app/providers/LoadingProvider.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { createContext, useContext, useState } from "react"; 4 | 5 | interface LoadingContext { 6 | isLoading: boolean; 7 | setIsLoading: (isLoading: boolean) => void; 8 | } 9 | 10 | const LoadingContext = createContext({ 11 | isLoading: false, 12 | setIsLoading: (isLoading: boolean) => {}, 13 | }); 14 | 15 | export const useLoading = () => useContext(LoadingContext); 16 | 17 | interface LoadingProviderProps { 18 | children: React.ReactNode; 19 | } 20 | 21 | export const LoadingProvider = ({ children }: LoadingProviderProps) => { 22 | const [isLoading, setIsLoading] = useState(false); 23 | 24 | return ( 25 | 31 | {children} 32 | 33 | ); 34 | }; 35 | -------------------------------------------------------------------------------- /apps/demo/components/LLMSelector.tsx: -------------------------------------------------------------------------------- 1 | import { LLMs } from "@/utils/types"; 2 | import { Select } from "@radix-ui/themes"; 3 | import React from "react"; 4 | 5 | const llms: { 6 | name: string; 7 | value: string; 8 | }[] = [ 9 | { 10 | name: "OpenAI GPT-3.5 Turbo", 11 | value: LLMs["openai/gpt-3.5-turbo"], 12 | }, 13 | { 14 | name: "OpenAI GPT-4", 15 | value: LLMs["openai/gpt-4"], 16 | }, 17 | { 18 | name: "Anthropic Claude-2", 19 | value: LLMs["anthropic/claude-2"], 20 | }, 21 | { 22 | name: "Azure OpenAI", 23 | value: LLMs["azure/openai/gpt35turbo"], 24 | }, 25 | ]; 26 | 27 | type Props = { 28 | value: string; 29 | onChange: (value: string) => void; 30 | }; 31 | 32 | export const LLMSelector = ({ onChange, value }: Props) => { 33 | return ( 34 | onChange(value)}> 35 | 36 | 37 | 38 | {llms.map((llm) => ( 39 | 40 | {llm.name} 41 | 42 | ))} 43 | 44 | 45 | 46 | ); 47 | }; 48 | -------------------------------------------------------------------------------- /apps/demo/components/Message.tsx: -------------------------------------------------------------------------------- 1 | import { Message as AIMessage } from "ai"; 2 | import cn from "classnames"; 3 | 4 | interface Props { 5 | message: AIMessage; 6 | } 7 | 8 | export const Message = ({ message }: Props) => { 9 | const { role, content } = message; 10 | 11 | return ( 12 |
18 |
19 |
25 | {role === "user" ? "You" : "Assistant"} 26 |
27 |
33 | {content} 34 |
35 |
36 |
37 | ); 38 | }; 39 | -------------------------------------------------------------------------------- /apps/demo/next.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = {}; 3 | 4 | module.exports = nextConfig; 5 | -------------------------------------------------------------------------------- /apps/demo/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "demo", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "dotenv -- next dev", 7 | "start": "next start", 8 | "format": "prettier --write ." 9 | }, 10 | "dependencies": { 11 | "@radix-ui/react-icons": "^1.3.0", 12 | "@radix-ui/themes": "^2.0.0-rc.3", 13 | "@types/node": "20.6.4", 14 | "@types/react": "18.2.22", 15 | "@types/react-dom": "18.2.7", 16 | "ai": "^2.2.13", 17 | "autoprefixer": "10.4.16", 18 | "classnames": "^2.3.2", 19 | "encoding": "^0.1.13", 20 | "next": "13.5.4", 21 | "openai": "^4.10.0", 22 | "react": "^18", 23 | "react-dom": "^18", 24 | "unillm": "*" 25 | }, 26 | "devDependencies": { 27 | "@types/node": "^20", 28 | "@types/react": "^18", 29 | "@types/react-dom": "^18", 30 | "autoprefixer": "^10", 31 | "eslint": "^8", 32 | "eslint-config-next": "13.5.4", 33 | "postcss": "^8", 34 | "tailwindcss": "^3", 35 | "typescript": "^5" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /apps/demo/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | }, 6 | }; 7 | -------------------------------------------------------------------------------- /apps/demo/public/next.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/demo/public/vercel.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/demo/tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from "tailwindcss"; 2 | 3 | const config: Config = { 4 | content: [ 5 | "./pages/**/*.{js,ts,jsx,tsx,mdx}", 6 | "./components/**/*.{js,ts,jsx,tsx,mdx}", 7 | "./app/**/*.{js,ts,jsx,tsx,mdx}", 8 | ], 9 | theme: { 10 | extend: { 11 | backgroundImage: { 12 | "gradient-radial": "radial-gradient(var(--tw-gradient-stops))", 13 | "gradient-conic": 14 | "conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))", 15 | }, 16 | }, 17 | }, 18 | plugins: [], 19 | }; 20 | export default config; 21 | -------------------------------------------------------------------------------- /apps/demo/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "lib": ["dom", "dom.iterable", "esnext"], 5 | "allowJs": true, 6 | "skipLibCheck": true, 7 | "strict": true, 8 | "noEmit": true, 9 | "esModuleInterop": true, 10 | "module": "esnext", 11 | "moduleResolution": "bundler", 12 | "resolveJsonModule": true, 13 | "isolatedModules": true, 14 | "jsx": "preserve", 15 | "incremental": true, 16 | "plugins": [ 17 | { 18 | "name": "next" 19 | } 20 | ], 21 | "paths": { 22 | "@/*": ["./*"] 23 | } 24 | }, 25 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], 26 | "exclude": ["node_modules"] 27 | } 28 | -------------------------------------------------------------------------------- /apps/demo/utils/types.ts: -------------------------------------------------------------------------------- 1 | export const LLMs = { 2 | "openai/gpt-3.5-turbo": "openai/gpt-3.5-turbo", 3 | "openai/gpt-4": "openai/gpt-4", 4 | "anthropic/claude-2": "anthropic/claude-2", 5 | "azure/openai/gpt35turbo": "azure/openai/gpt35turbo", 6 | }; 7 | -------------------------------------------------------------------------------- /apps/docs/.github/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/docs/.github/screenshot.png -------------------------------------------------------------------------------- /apps/docs/.gitignore: -------------------------------------------------------------------------------- 1 | .next 2 | node_modules 3 | -------------------------------------------------------------------------------- /apps/docs/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Pezzo, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /apps/docs/README.md: -------------------------------------------------------------------------------- 1 | # Nextra Docs Template 2 | 3 | This is a template for creating documentation with [Nextra](https://nextra.site). 4 | 5 | [**Live Demo →**](https://nextra-docs-template.vercel.app) 6 | 7 | [![](.github/screenshot.png)](https://nextra-docs-template.vercel.app) 8 | 9 | ## Quick Start 10 | 11 | Click the button to clone this repository and deploy it on Vercel: 12 | 13 | [![](https://vercel.com/button)](https://vercel.com/new/clone?s=https%3A%2F%2Fgithub.com%2Fshuding%2Fnextra-docs-template&showOptionalTeamCreation=false) 14 | 15 | ## Local Development 16 | 17 | First, run `pnpm i` to install the dependencies. 18 | 19 | Then, run `pnpm dev` to start the development server and visit localhost:3000. 20 | 21 | ## License 22 | 23 | This project is licensed under the MIT License. 24 | -------------------------------------------------------------------------------- /apps/docs/components/DynamicCodeExample.tsx: -------------------------------------------------------------------------------- 1 | import { useEffect, useRef, useState } from "react"; 2 | import { Select } from "@radix-ui/themes"; 3 | import { models, providers } from "llm-repo"; 4 | import { providerToLogoMapping } from "llm-repo/logos"; 5 | import type { Model, ModelDefinition } from "llm-repo"; 6 | import Image from "next/image"; 7 | 8 | export const getSelectContent = (allowedProvider?) => { 9 | const sections: { [key: string]: { key: string, logo: any, models: { value: string, name: string }[] } } = {}; 10 | 11 | Object.entries(models).forEach((entry) => { 12 | const modelValue: Model = entry[0]; 13 | const modelDefinition: ModelDefinition = entry[1]; 14 | 15 | const provider = modelDefinition.provider as string; 16 | 17 | if (!sections[provider]) { 18 | const logo = providerToLogoMapping[modelDefinition.provider] 19 | sections[provider] = { 20 | key: provider, 21 | logo, 22 | models: [] 23 | } 24 | } 25 | 26 | sections[provider].models.push({ 27 | value: modelValue, 28 | name: modelDefinition.name 29 | }); 30 | }); 31 | 32 | const entries = Object.entries(sections); 33 | 34 | return entries 35 | .filter(([providerKey]) => allowedProvider ? providerKey === allowedProvider : true) 36 | .map(([providerKey, section], index) => { 37 | const { logo, models } = section; 38 | const { name: providerName } = providers[providerKey]; 39 | 40 | return ( 41 | <> 42 | 43 | 44 | 45 | 46 | {providerName} 47 | 48 | 49 | {models.map(({ name, value }) => ( 50 | 51 |
52 | 53 | {providerName} 54 | {name} 55 |
56 |
57 | ))} 58 |
59 | {!allowedProvider && index < entries.length - 1 && } 60 | 61 | ) 62 | }); 63 | } 64 | 65 | type Props = { 66 | children: React.ReactNode; 67 | defaultLLM: string; 68 | allowedProvider?: string; 69 | } 70 | 71 | export function DynamicCodeExample({ children, defaultLLM, allowedProvider }: Props) { 72 | const ref = useRef(); 73 | const setupRef = useRef(); 74 | const modelRef = useRef(); 75 | const [selectedLLM, setSelectedLLM] = useState(defaultLLM ?? "openai/gpt-3.5-turbo"); 76 | 77 | // Find the corresponding token from the DOM 78 | useEffect(() => { 79 | if (ref.current) { 80 | const code = [...ref.current.querySelectorAll("code span")]; 81 | 82 | const model = code.find( 83 | (el) => el.innerText === `"#MODEL#"`, 84 | ); 85 | modelRef.current = model; 86 | 87 | const setup = code.find( 88 | (el) => el.innerText === " #SETUP#", 89 | ); 90 | setupRef.current = setup; 91 | } 92 | }, []); 93 | 94 | useEffect(() => { 95 | if (setupRef.current && modelRef.current) { 96 | const model = models[selectedLLM]; 97 | 98 | modelRef.current.innerText = `"${model.modelTokenOverride ?? selectedLLM}"`; 99 | setupRef.current.innerText = model.setup; 100 | setupRef.current.style.color = "var(--shiki-token-comment)"; 101 | } 102 | }, [selectedLLM]); 103 | 104 | const handleSelectChange = (value: string) => { 105 | setSelectedLLM(value); 106 | } 107 | 108 | return ( 109 | <> 110 |
111 |
112 |
113 |
114 | 115 | 116 | 117 | {getSelectContent(allowedProvider)} 118 | 119 | 120 |
121 |
122 | 123 |
124 | {children} 125 |
126 |
127 | 128 | ); 129 | } 130 | -------------------------------------------------------------------------------- /apps/docs/components/counters.module.css: -------------------------------------------------------------------------------- 1 | .counter { 2 | border: 1px solid #ccc; 3 | border-radius: 5px; 4 | padding: 2px 6px; 5 | margin: 12px 0 0; 6 | } 7 | -------------------------------------------------------------------------------- /apps/docs/components/counters.tsx: -------------------------------------------------------------------------------- 1 | // Example from https://beta.reactjs.org/learn 2 | 3 | import { useState } from 'react' 4 | import styles from './counters.module.css' 5 | 6 | function MyButton() { 7 | const [count, setCount] = useState(0) 8 | 9 | function handleClick() { 10 | setCount(count + 1) 11 | } 12 | 13 | return ( 14 |
15 | 18 |
19 | ) 20 | } 21 | 22 | export default function MyApp() { 23 | return 24 | } 25 | -------------------------------------------------------------------------------- /apps/docs/next-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | 4 | // NOTE: This file should not be edited 5 | // see https://nextjs.org/docs/basic-features/typescript for more information. 6 | -------------------------------------------------------------------------------- /apps/docs/next.config.js: -------------------------------------------------------------------------------- 1 | const withNextra = require('nextra')({ 2 | theme: 'nextra-theme-docs', 3 | themeConfig: './theme.config.tsx', 4 | }) 5 | 6 | /** @type {import("next").NextConfig} */ 7 | module.exports = withNextra(({ 8 | transpilePackages: ["llm-repo"] 9 | })) -------------------------------------------------------------------------------- /apps/docs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docs", 3 | "version": "0.0.1", 4 | "description": "UniLLM Documentation", 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "git+https://github.com/pezzolabs/unillm.git" 13 | }, 14 | "author": "Ariel Weinberger ", 15 | "license": "MIT", 16 | "bugs": { 17 | "url": "https://github.com/pezzolabs/unillm/issues" 18 | }, 19 | "homepage": "https://github.com/pezzolabs/unillm#readme", 20 | "dependencies": { 21 | "@radix-ui/themes": "^2.0.0", 22 | "llm-repo": "*", 23 | "next": "^13.0.6", 24 | "nextra": "latest", 25 | "nextra-theme-docs": "latest", 26 | "react": "^18.2.0", 27 | "react-dom": "^18.2.0" 28 | }, 29 | "devDependencies": { 30 | "@types/node": "18.11.10", 31 | "autoprefixer": "^10.4.16", 32 | "postcss": "^8.4.31", 33 | "postcss-nesting": "^12.0.1", 34 | "tailwindcss": "^3.3.3", 35 | "typescript": "^4.9.3" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /apps/docs/pages/_app.tsx: -------------------------------------------------------------------------------- 1 | import { Theme } from "@radix-ui/themes"; 2 | import "../styles/globals.css"; 3 | import "@radix-ui/themes/styles.css"; 4 | 5 | export default function Nextra({ Component, pageProps }) { 6 | return ( 7 | 8 | 9 | 10 | ); 11 | } -------------------------------------------------------------------------------- /apps/docs/pages/_meta.json: -------------------------------------------------------------------------------- 1 | { 2 | "index": { 3 | "title": "Getting Started", 4 | "icon": "fa fa-home" 5 | }, 6 | "providers-and-models": { 7 | "title": "Providers & Models", 8 | "icon": "fa fa-home" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /apps/docs/pages/index.mdx: -------------------------------------------------------------------------------- 1 | import { Card, Cards, Steps } from 'nextra/components' 2 | import { DynamicCodeExample } from '../components/DynamicCodeExample' 3 | 4 | # What is UniLLM? 5 | 6 | UniLLM allows you to call any LLM using the OpenAI API, with 100% type safety. 7 | 8 | ## Benefits 9 | - ✨ Integrate with any provider and model using the OpenAI API 10 | - 💬 Consistent chatCompletion responses and logs across all models and providers 11 | - 💯 Type safety across all providers and models 12 | - 🔁 Seamlessly switch between LLMs without rewriting your codebase 13 | - ✅ If you write tests for your service, you only need to test it once 14 | - 🔜 (Coming Soon) Request caching and rate limiting 15 | - 🔜 (Coming Soon) Cost monitoring and alerting 16 | 17 | ## Getting Started 18 | 19 | 20 | 21 | ### Install UniLLM 22 | ```bash copy 23 | npm i unillm 24 | ``` 25 | 26 | ### Make an API call 27 | 28 | ```ts copy 29 | import { UniLLM } from 'unillm'; 30 | 31 | /* 32 | #SETUP# 33 | */ 34 | 35 | // Setup UniLLM 36 | const unillm = new UniLLM(); 37 | 38 | // Use any LLM provider and model 39 | const response = await unillm.createChatCompletion("#MODEL#", { 40 | stream: true, 41 | temperature: 0, 42 | messages: [ 43 | { 44 | role: "user", 45 | content: "How are you?" 46 | } 47 | ], 48 | }) 49 | ``` 50 | 51 | 52 | 53 | 54 | 55 | ## Supported Providers & Models 56 | 57 | Below is a list of models that are currently supported by UniLLM. If you would like to see a model added to the roadmap, [please open an issue on GitHub](https://github.com/pezzolabs/UniLLM). 58 | 59 | | **Provider/LLM** | **Chat Completions** | **Streaming** | 60 | |----------------------------|-----------------------|----------------| 61 | | OpenAI `gpt-3.5-turbo` | ✅ | ✅ | 62 | | OpenAI `gpt-4` | ✅ | ✅ | 63 | | Anthropic `claude-2` | ✅ | ✅ | 64 | | Anthropic `claude-instant-1` | ✅ | ✅ | 65 | | Azure OpenAI (all models) | ✅ | ✅ | 66 | | Llama 2 | 🚧 Coming Soon | 🚧 Coming Soon | 67 | | Falcon | 🚧 Coming Soon | 🚧 Coming Soon | 68 | | Mistral | 🚧 Coming Soon | 🚧 Coming Soon | 69 | | AWS Bedrock | 🚧 Coming Soon | 🚧 Coming Soon | 70 | | AI21 | 🚧 Coming Soon | 🚧 Coming Soon | 71 | | Huggingface | 🚧 Coming Soon | 🚧 Coming Soon | -------------------------------------------------------------------------------- /apps/docs/pages/providers-and-models/_meta.json: -------------------------------------------------------------------------------- 1 | { 2 | "openai": { 3 | "title": "OpenAI" 4 | }, 5 | "anthropic": { 6 | "title": "Anthropic" 7 | }, 8 | "azure-openai": { 9 | "title": "Azure OpenAI" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /apps/docs/pages/providers-and-models/anthropic.mdx: -------------------------------------------------------------------------------- 1 | import { DynamicCodeExample } from "../../components/DynamicCodeExample"; 2 | 3 | # Anthropic 4 | 5 | ## Before you start 6 | 7 | 1. Obtain an Anthropic API key [here](https://console.anthropic.com/account/keys). 8 | 2. Make sure the `ANTHROPIC_API_KEY` environment variable is set. 9 | 10 | ## Usage 11 | 12 | 13 | ```ts copy 14 | import { UniLLM } from 'unillm'; 15 | 16 | /* 17 | #SETUP# 18 | */ 19 | 20 | // Setup UniLLM 21 | const unillm = new UniLLM(); 22 | 23 | // Use any LLM provider and model 24 | const response = await unillm.createChatCompletion("#MODEL#", { 25 | temperature: 0, 26 | messages: [ 27 | { 28 | role: "user", 29 | content: "How are you?" 30 | } 31 | ] 32 | }) 33 | ``` 34 | -------------------------------------------------------------------------------- /apps/docs/pages/providers-and-models/azure-openai.mdx: -------------------------------------------------------------------------------- 1 | import { DynamicCodeExample } from "../../components/DynamicCodeExample"; 2 | 3 | # Azure OpenAI 4 | 5 | ## Before you start 6 | 7 | 1. Head over to [Azure OpenAI](https://portal.azure.com/#view/Microsoft_Azure_ProjectOxford/CognitiveServicesHub/~/OpenAI) and create an instance if you don't have one. 8 | 2. Create a new *model deployment* on Azure AI and configure it as you wish. 9 | 3. Set the following environment variables: 10 | - `AZURE_OPENAI_ENDPOINT` - your Azure OpenAI endpoint 11 | - `AZURE_OPENAI_API_KEY` - your Azure OpenAI API key 12 | 13 | ## Usage 14 | 15 | 16 | ```ts copy 17 | import { UniLLM } from 'unillm'; 18 | 19 | /* 20 | #SETUP# 21 | */ 22 | 23 | // Setup UniLLM 24 | const unillm = new UniLLM(); 25 | 26 | // Use any LLM provider and model 27 | const response = await unillm.createChatCompletion("#MODEL#", { 28 | temperature: 0, 29 | messages: [ 30 | { 31 | role: "user", 32 | content: "How are you?" 33 | } 34 | ] 35 | }) 36 | ``` 37 | -------------------------------------------------------------------------------- /apps/docs/pages/providers-and-models/openai.mdx: -------------------------------------------------------------------------------- 1 | import { DynamicCodeExample } from "../../components/DynamicCodeExample"; 2 | 3 | # OpenAI 4 | 5 | ## Before you start 6 | 7 | 1. Obtain an OpenAI API key [here](https://platform.openai.com/account/api-keys). 8 | 2. Make sure the `OPENAI_API_KEY` environment variable is set. 9 | 10 | ## Usage 11 | 12 | 13 | ```ts copy 14 | import { UniLLM } from 'unillm'; 15 | 16 | /* 17 | #SETUP# 18 | */ 19 | 20 | // Setup UniLLM 21 | const unillm = new UniLLM(); 22 | 23 | // Use any LLM provider and model 24 | const response = await unillm.createChatCompletion("#MODEL#", { 25 | temperature: 0, 26 | messages: [ 27 | { 28 | role: "user", 29 | content: "How are you?" 30 | } 31 | ] 32 | }) 33 | ``` 34 | -------------------------------------------------------------------------------- /apps/docs/postcss.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | "tailwindcss/nesting": {}, 4 | tailwindcss: {}, 5 | autoprefixer: {}, 6 | }, 7 | } 8 | -------------------------------------------------------------------------------- /apps/docs/public/favicon/android-chrome-192x192.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/docs/public/favicon/android-chrome-192x192.png -------------------------------------------------------------------------------- /apps/docs/public/favicon/android-chrome-512x512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/docs/public/favicon/android-chrome-512x512.png -------------------------------------------------------------------------------- /apps/docs/public/favicon/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/docs/public/favicon/apple-touch-icon.png -------------------------------------------------------------------------------- /apps/docs/public/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/docs/public/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /apps/docs/public/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/docs/public/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /apps/docs/public/favicon/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/apps/docs/public/favicon/favicon.ico -------------------------------------------------------------------------------- /apps/docs/public/favicon/site.webmanifest: -------------------------------------------------------------------------------- 1 | {"name":"","short_name":"","icons":[{"src":"/android-chrome-192x192.png","sizes":"192x192","type":"image/png"},{"src":"/android-chrome-512x512.png","sizes":"512x512","type":"image/png"}],"theme_color":"#ffffff","background_color":"#ffffff","display":"standalone"} -------------------------------------------------------------------------------- /apps/docs/styles/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | .nextra-code-block { 6 | > pre { 7 | @apply rounded-md; 8 | @apply bg-stone-800 !important; 9 | @apply border; 10 | @apply border-stone-700; 11 | } 12 | } 13 | 14 | .dynamic-code-example { 15 | .nextra-code-block { 16 | @apply bg-stone-900; 17 | 18 | pre { 19 | @apply rounded-none; 20 | @apply mb-0; 21 | } 22 | } 23 | } 24 | 25 | article { 26 | h1, h2, h3, h4, h5, h6 { 27 | @apply border-none; 28 | } 29 | } -------------------------------------------------------------------------------- /apps/docs/tailwind.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | content: [ 4 | './pages/**/*.{js,jsx,ts,tsx,md,mdx}', 5 | './components/**/*.{js,jsx,ts,tsx,md,mdx}', 6 | ], 7 | theme: { 8 | extend: {} 9 | }, 10 | plugins: [] 11 | } -------------------------------------------------------------------------------- /apps/docs/theme.config.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react' 2 | import { DocsThemeConfig } from 'nextra-theme-docs' 3 | import Image from "next/image" 4 | import Logo from "./assets/logo.svg" 5 | 6 | const config: DocsThemeConfig = { 7 | logo: UniLLM Logo, 8 | project: { 9 | link: 'https://github.com/pezzolabs/unillm', 10 | }, 11 | chat: { 12 | link: 'https://discord.gg/XcEVPePwn2', 13 | }, 14 | docsRepositoryBase: 'https://github.com/pezzolabs/unillm/tree/main/apps/docs', 15 | footer: { 16 | text: 'UniLLM', 17 | }, 18 | darkMode: true, 19 | nextThemes: { 20 | defaultTheme: "dark", 21 | }, 22 | themeSwitch: { 23 | component: false, 24 | }, 25 | primaryHue: 320, 26 | primarySaturation: 70, 27 | useNextSeoProps() { 28 | return { 29 | titleTemplate: `%s - UniLLM`, 30 | } 31 | }, 32 | head: ( 33 | <> 34 | 35 | 36 | 37 | 38 | 39 | ) 40 | } 41 | 42 | export default config 43 | -------------------------------------------------------------------------------- /apps/docs/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "tsconfig/base", 3 | "compilerOptions": { 4 | "target": "es5", 5 | "lib": ["dom", "dom.iterable", "esnext", "es2017"], 6 | "allowJs": true, 7 | "skipLibCheck": true, 8 | "strict": false, 9 | "forceConsistentCasingInFileNames": true, 10 | "noEmit": true, 11 | "incremental": true, 12 | "esModuleInterop": true, 13 | "module": "esnext", 14 | "moduleResolution": "node", 15 | "resolveJsonModule": true, 16 | "isolatedModules": true, 17 | "jsx": "preserve" 18 | }, 19 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], 20 | "exclude": ["node_modules"] 21 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": true, 3 | "scripts": { 4 | "build": "turbo run build", 5 | "dev": "dotenv -- turbo run dev", 6 | "test": "dotenv -- turbo run test", 7 | "lint": "turbo run lint", 8 | "format": "turbo run format" 9 | }, 10 | "devDependencies": { 11 | "@rollup/plugin-wasm": "^6.2.2", 12 | "@types/json-stable-stringify": "^1.0.34", 13 | "dotenv-cli": "^7.3.0", 14 | "eslint": "^8.48.0", 15 | "prettier": "^3.0.3", 16 | "tsconfig": "*", 17 | "turbo": "latest" 18 | }, 19 | "name": "unillm", 20 | "packageManager": "npm@8.19.2", 21 | "workspaces": [ 22 | "apps/*", 23 | "packages/*" 24 | ], 25 | "dependencies": { 26 | "json-stable-stringify": "^1.0.2" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /packages/eslint-config-custom/README.md: -------------------------------------------------------------------------------- 1 | # `@turbo/eslint-config` 2 | 3 | Collection of internal eslint configurations. 4 | -------------------------------------------------------------------------------- /packages/eslint-config-custom/library.js: -------------------------------------------------------------------------------- 1 | const { resolve } = require("node:path"); 2 | 3 | const project = resolve(process.cwd(), "tsconfig.json"); 4 | 5 | /* 6 | * This is a custom ESLint configuration for use with 7 | * typescript packages. 8 | * 9 | * This config extends the Vercel Engineering Style Guide. 10 | * For more information, see https://github.com/vercel/style-guide 11 | * 12 | */ 13 | 14 | module.exports = { 15 | extends: [ 16 | "@vercel/style-guide/eslint/node", 17 | "@vercel/style-guide/eslint/typescript", 18 | ].map(require.resolve), 19 | parserOptions: { 20 | project, 21 | }, 22 | globals: { 23 | React: true, 24 | JSX: true, 25 | }, 26 | settings: { 27 | "import/resolver": { 28 | typescript: { 29 | project, 30 | }, 31 | }, 32 | }, 33 | ignorePatterns: ["node_modules/", "dist/"], 34 | }; 35 | -------------------------------------------------------------------------------- /packages/eslint-config-custom/next.js: -------------------------------------------------------------------------------- 1 | const { resolve } = require("node:path"); 2 | 3 | const project = resolve(process.cwd(), "tsconfig.json"); 4 | 5 | /* 6 | * This is a custom ESLint configuration for use with 7 | * Next.js apps. 8 | * 9 | * This config extends the Vercel Engineering Style Guide. 10 | * For more information, see https://github.com/vercel/style-guide 11 | * 12 | */ 13 | 14 | module.exports = { 15 | extends: [ 16 | "@vercel/style-guide/eslint/node", 17 | "@vercel/style-guide/eslint/browser", 18 | "@vercel/style-guide/eslint/typescript", 19 | "@vercel/style-guide/eslint/react", 20 | "@vercel/style-guide/eslint/next", 21 | "eslint-config-turbo", 22 | ].map(require.resolve), 23 | parserOptions: { 24 | project, 25 | }, 26 | globals: { 27 | React: true, 28 | JSX: true, 29 | }, 30 | settings: { 31 | "import/resolver": { 32 | typescript: { 33 | project, 34 | }, 35 | }, 36 | }, 37 | ignorePatterns: ["node_modules/", "dist/"], 38 | // add rules configurations here 39 | rules: { 40 | "import/no-default-export": "off", 41 | }, 42 | }; 43 | -------------------------------------------------------------------------------- /packages/eslint-config-custom/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "eslint-config-custom", 3 | "license": "MIT", 4 | "version": "0.0.0", 5 | "private": true, 6 | "devDependencies": { 7 | "@vercel/style-guide": "^5.0.0", 8 | "eslint-config-turbo": "^1.10.12" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /packages/eslint-config-custom/react-internal.js: -------------------------------------------------------------------------------- 1 | const { resolve } = require("node:path"); 2 | 3 | const project = resolve(process.cwd(), "tsconfig.json"); 4 | 5 | /* 6 | * This is a custom ESLint configuration for use with 7 | * internal (bundled by their consumer) libraries 8 | * that utilize React. 9 | * 10 | * This config extends the Vercel Engineering Style Guide. 11 | * For more information, see https://github.com/vercel/style-guide 12 | * 13 | */ 14 | 15 | module.exports = { 16 | extends: [ 17 | "@vercel/style-guide/eslint/browser", 18 | "@vercel/style-guide/eslint/typescript", 19 | "@vercel/style-guide/eslint/react", 20 | ].map(require.resolve), 21 | parserOptions: { 22 | project, 23 | }, 24 | globals: { 25 | JSX: true, 26 | }, 27 | settings: { 28 | "import/resolver": { 29 | typescript: { 30 | project, 31 | }, 32 | }, 33 | }, 34 | ignorePatterns: ["node_modules/", "dist/", ".eslintrc.js"], 35 | 36 | rules: { 37 | // add specific rules configurations here 38 | }, 39 | }; 40 | -------------------------------------------------------------------------------- /packages/llm-repo/images/anthropic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/packages/llm-repo/images/anthropic.png -------------------------------------------------------------------------------- /packages/llm-repo/images/azure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/packages/llm-repo/images/azure.png -------------------------------------------------------------------------------- /packages/llm-repo/images/openai.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pezzolabs/UniLLM/b129da5c077000baeb2c66e3f3ceafdc9ae3f22a/packages/llm-repo/images/openai.png -------------------------------------------------------------------------------- /packages/llm-repo/index.ts: -------------------------------------------------------------------------------- 1 | export type ProviderDefinition = { 2 | name: string; 3 | }; 4 | 5 | export const providers: { 6 | [key: string]: ProviderDefinition; 7 | } = { 8 | openai: { 9 | name: "OpenAI", 10 | }, 11 | anthropic: { 12 | name: "Anthropic", 13 | }, 14 | azure: { 15 | name: "Azure", 16 | }, 17 | }; 18 | 19 | export type Provider = keyof typeof providers; 20 | 21 | export type ModelDefinition = { 22 | provider: keyof typeof providers; 23 | name: string; 24 | setup: string; 25 | modelTokenOverride?: string; 26 | }; 27 | 28 | const openaiSetup = ` Make sure the following environment variables are set: 29 | OPENAI_API_KEY - your OpenAI API key 30 | `; 31 | 32 | const anthropicSetup = ` Make sure the following environment variables are set: 33 | ANTHROPIC_API_KEY - your Anthropic API key 34 | `; 35 | 36 | const azureSetup = ` Make sure the following environment variables are set: 37 | AZURE_OPENAI_ENDPOINT - your Azure OpenAI endpoint 38 | AZURE_OPENAI_API_KEY - your Azure OpenAI API key 39 | `; 40 | 41 | export const models: { 42 | [key: string]: ModelDefinition; 43 | } = { 44 | "openai/gpt-3.5-turbo": { 45 | provider: "openai", 46 | name: "GPT-3.5 Turbo", 47 | setup: openaiSetup, 48 | }, 49 | "openai/gpt-4": { 50 | provider: "openai", 51 | name: "GPT-4", 52 | setup: openaiSetup, 53 | }, 54 | "anthropic/claude-2": { 55 | provider: "anthropic", 56 | name: "Claude 2", 57 | setup: anthropicSetup, 58 | }, 59 | "anthropic/claude-1-instant": { 60 | provider: "anthropic", 61 | name: "Claude 1 Instant", 62 | setup: anthropicSetup, 63 | }, 64 | "azure/openai": { 65 | provider: "azure", 66 | name: "Azure OpenAI", 67 | setup: azureSetup, 68 | modelTokenOverride: "azure/openai/", 69 | }, 70 | }; 71 | 72 | export type Model = keyof typeof models; 73 | -------------------------------------------------------------------------------- /packages/llm-repo/logos.ts: -------------------------------------------------------------------------------- 1 | import OpenAILogo from "./images/openai.png"; 2 | import AnthropicLogo from "./images/anthropic.png"; 3 | import AzureLogo from "./images/azure.png"; 4 | 5 | import { Provider } from "./index"; 6 | 7 | export const providerToLogoMapping: { 8 | [key in Provider]: any; 9 | } = { 10 | openai: OpenAILogo, 11 | anthropic: AnthropicLogo, 12 | azure: AzureLogo, 13 | }; 14 | 15 | export const getProviderLogo = (provider: Provider) => { 16 | return providerToLogoMapping[provider]; 17 | }; 18 | -------------------------------------------------------------------------------- /packages/llm-repo/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "llm-repo", 3 | "version": "0.0.0", 4 | "main": "./index.tsx", 5 | "types": "./index.tsx", 6 | "license": "MIT", 7 | "scripts": { 8 | "format": "prettier . --write" 9 | }, 10 | "devDependencies": { 11 | "eslint-config-custom": "*", 12 | "tsconfig": "*" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /packages/llm-repo/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "tsconfig/base.json", 3 | "include": ["."], 4 | "exclude": ["dist", "build", "node_modules"] 5 | } 6 | -------------------------------------------------------------------------------- /packages/tsconfig/base.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "display": "Default", 4 | "compilerOptions": { 5 | "target": "ESNext", 6 | "composite": false, 7 | "declaration": true, 8 | "declarationMap": true, 9 | "esModuleInterop": true, 10 | "forceConsistentCasingInFileNames": true, 11 | "inlineSources": false, 12 | "isolatedModules": true, 13 | "moduleResolution": "node", 14 | "noUnusedLocals": false, 15 | "noUnusedParameters": false, 16 | "preserveWatchOutput": true, 17 | "skipLibCheck": true, 18 | "strict": true, 19 | "strictNullChecks": true 20 | }, 21 | "exclude": ["node_modules"] 22 | } 23 | -------------------------------------------------------------------------------- /packages/tsconfig/nextjs.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "display": "Next.js", 4 | "extends": "./base.json", 5 | "compilerOptions": { 6 | "plugins": [{ "name": "next" }], 7 | "allowJs": true, 8 | "declaration": false, 9 | "declarationMap": false, 10 | "incremental": true, 11 | "jsx": "preserve", 12 | "lib": ["dom", "dom.iterable", "esnext"], 13 | "module": "esnext", 14 | "noEmit": true, 15 | "resolveJsonModule": true, 16 | "strict": false, 17 | "target": "es5" 18 | }, 19 | "include": ["src", "next-env.d.ts"], 20 | "exclude": ["node_modules"] 21 | } 22 | -------------------------------------------------------------------------------- /packages/tsconfig/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "tsconfig", 3 | "version": "0.0.0", 4 | "private": true, 5 | "license": "MIT", 6 | "publishConfig": { 7 | "access": "public" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /packages/tsconfig/react-library.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://json.schemastore.org/tsconfig", 3 | "display": "React Library", 4 | "extends": "./base.json", 5 | "compilerOptions": { 6 | "jsx": "react-jsx", 7 | "lib": ["ES2015", "DOM"], 8 | "module": "ESNext", 9 | "target": "es6" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /packages/unillm-node/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "parserOptions": { 3 | "ecmaVersion": "latest" 4 | }, 5 | 6 | "env": { 7 | "es6": true 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /packages/unillm-node/index.ts: -------------------------------------------------------------------------------- 1 | import { OpenAIProvider } from "./providers/openai"; 2 | import { 3 | ModelParamValues, 4 | UnifiedCreateChatCompletionNonStreamResult, 5 | UnifiedCreateChatCompletionParamsNonStreaming, 6 | UnifiedCreateChatCompletionParamsStreaming, 7 | UnifiedCreateChatCompletionStreamResult, 8 | } from "./utils/types"; 9 | import { BaseProvider } from "./providers/baseProvider"; 10 | import { ModelTypes } from "./utils/types"; 11 | import { AnthropicProvider } from "./providers/anthropic"; 12 | import { AzureOpenAIProvider } from "./providers/azure-openai"; 13 | 14 | const providers: { [k: string]: new () => BaseProvider } = { 15 | openai: OpenAIProvider, 16 | anthropic: AnthropicProvider, 17 | azure: AzureOpenAIProvider, 18 | }; 19 | 20 | export class UniLLM { 21 | // Non-streaming version 22 | public createChatCompletion( 23 | providerAndModel: keyof ModelParamValues, 24 | params: UnifiedCreateChatCompletionParamsNonStreaming, 25 | ): Promise; 26 | 27 | // Streaming version 28 | public createChatCompletion( 29 | providerAndModel: keyof ModelParamValues, 30 | params: UnifiedCreateChatCompletionParamsStreaming, 31 | ): Promise; 32 | 33 | public createChatCompletion( 34 | providerAndModel: keyof ModelParamValues, 35 | params: 36 | | UnifiedCreateChatCompletionParamsNonStreaming 37 | | UnifiedCreateChatCompletionParamsStreaming, 38 | ): 39 | | Promise 40 | | Promise { 41 | const [providerName, ...rest] = providerAndModel.split("/"); 42 | const model = rest.join("/"); 43 | const provider = providers[providerName]; 44 | 45 | if (!provider) { 46 | throw new Error(`Invalid provider provided - "${providerName}"`); 47 | } 48 | 49 | const providerInstance = new provider(); 50 | 51 | if (params.stream === true) { 52 | return providerInstance.createChatCompletionStreaming(model, { 53 | ...params, 54 | }); 55 | } else { 56 | return providerInstance.createChatCompletionNonStreaming(model, { 57 | ...params, 58 | }); 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /packages/unillm-node/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "unillm", 3 | "version": "0.1.2", 4 | "main": "./index.ts", 5 | "type": "commonjs", 6 | "license": "MIT", 7 | "author": "Ariel Weinberger ", 8 | "repository": { 9 | "type": "git", 10 | "url": "https://github.com/pezzolabs/UniLLM.git" 11 | }, 12 | "homepage": "https://docs.unillm.ai", 13 | "scripts": { 14 | "lint": "eslint .", 15 | "test": "vitest --coverage", 16 | "format": "prettier --write .", 17 | "build": "rimraf dist && rollup -c", 18 | "build:watch": "rimraf dist && rollup -c --watch" 19 | }, 20 | "devDependencies": { 21 | "@rollup/plugin-commonjs": "^25.0.7", 22 | "@rollup/plugin-json": "^6.0.1", 23 | "@rollup/plugin-node-resolve": "^15.2.3", 24 | "@turbo/gen": "^1.10.12", 25 | "@types/node": "^20.5.2", 26 | "@types/react": "^18.2.0", 27 | "@types/react-dom": "^18.2.0", 28 | "@vitest/coverage-v8": "^0.34.6", 29 | "eslint-config-custom": "*", 30 | "prettier": "^3.0.3", 31 | "react": "^18.2.0", 32 | "rimraf": "^5.0.5", 33 | "rollup": "^4.1.3", 34 | "rollup-plugin-copy": "^3.5.0", 35 | "rollup-plugin-generate-package-json": "^3.2.0", 36 | "rollup-plugin-typescript2": "^0.36.0", 37 | "typescript": "^4.5.2", 38 | "vitest": "^0.34.6", 39 | "zod": "^3.22.4" 40 | }, 41 | "dependencies": { 42 | "@anthropic-ai/sdk": "^0.6.2", 43 | "@azure/openai": "^1.0.0-beta.6", 44 | "@dqbd/tiktoken": "^1.0.7", 45 | "llm-repo": "*", 46 | "node-fetch": "^3.3.2", 47 | "openai": "^4.8.0" 48 | }, 49 | "keywords": [ 50 | "llm", 51 | "api", 52 | "openai", 53 | "azure", 54 | "anthropic", 55 | "cohere", 56 | "chatgpt", 57 | "promptengineering", 58 | "ai", 59 | "typescript", 60 | "javascript", 61 | "client", 62 | "sdk" 63 | ] 64 | } 65 | -------------------------------------------------------------------------------- /packages/unillm-node/providers/anthropic.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from "openai"; 2 | import Anthropic from "@anthropic-ai/sdk"; 3 | import { APIError } from "@anthropic-ai/sdk/error"; 4 | import { Stream } from "@anthropic-ai/sdk/streaming"; 5 | import { 6 | UnifiedCreateChatCompletionNonStreamResult, 7 | UnifiedCreateChatCompletionStreamResult, 8 | UnifiedCreateChatCompletionParamsNonStreaming, 9 | UnifiedCreateChatCompletionParamsStreaming, 10 | Providers, 11 | ModelTypes, 12 | } from "../utils/types"; 13 | import { UnifiedErrorResponse } from "../utils/UnifiedErrorResponse"; 14 | 15 | import { Tiktoken } from "@dqbd/tiktoken"; 16 | import cl100k_base from "@dqbd/tiktoken/encoders/cl100k_base.json"; 17 | import { Readable } from "stream"; 18 | import { BaseProvider } from "./baseProvider"; 19 | 20 | export class AnthropicProvider extends BaseProvider { 21 | private anthropic = new Anthropic(); 22 | 23 | async createChatCompletionNonStreaming( 24 | model: ModelTypes[Providers.Anthropic], 25 | params: UnifiedCreateChatCompletionParamsNonStreaming, 26 | ): Promise { 27 | const { baseParams, prompt } = 28 | this.processUnifiedParamsToAnthropicFormat(params); 29 | 30 | let nativeResult: Anthropic.Completions.Completion; 31 | 32 | try { 33 | nativeResult = await this.anthropic.completions.create({ 34 | ...baseParams, 35 | model, 36 | stream: false, 37 | }); 38 | } catch (_error: unknown) { 39 | if (!(_error instanceof APIError)) { 40 | throw _error; 41 | } 42 | 43 | const error = _error as APIError; 44 | throw new UnifiedErrorResponse( 45 | { 46 | model, 47 | }, 48 | error.status, 49 | (error.error as any).error, 50 | error.message, 51 | error.headers, 52 | ); 53 | } 54 | 55 | const finishReasonMapping: { 56 | [ 57 | key: string 58 | ]: OpenAI.Chat.Completions.ChatCompletion.Choice["finish_reason"]; 59 | } = { 60 | max_tokens: "length", 61 | stop_sequence: "stop", 62 | }; 63 | 64 | const choices: OpenAI.Chat.Completions.ChatCompletion.Choice[] = [ 65 | { 66 | index: 0, 67 | message: { 68 | role: "assistant", 69 | content: this.trimLeadingSpaces(nativeResult.completion), 70 | }, 71 | finish_reason: finishReasonMapping[nativeResult.stop_reason], 72 | }, 73 | ]; 74 | 75 | const encoding = new Tiktoken( 76 | cl100k_base.bpe_ranks, 77 | cl100k_base.special_tokens, 78 | cl100k_base.pat_str, 79 | ); 80 | 81 | const prompt_tokens = encoding.encode(prompt).length; 82 | const completion_tokens = encoding.encode(nativeResult.completion).length; 83 | const total_tokens = prompt_tokens + completion_tokens; 84 | 85 | encoding.free(); 86 | 87 | const result: OpenAI.Chat.Completions.ChatCompletion = { 88 | id: (nativeResult as any).log_id, 89 | choices, 90 | model: nativeResult.model, 91 | object: "chat.completion", 92 | usage: { prompt_tokens, completion_tokens, total_tokens }, 93 | created: Date.now(), 94 | }; 95 | 96 | return result; 97 | } 98 | 99 | async createChatCompletionStreaming( 100 | model: ModelTypes[Providers.Anthropic], 101 | params: UnifiedCreateChatCompletionParamsStreaming, 102 | ): Promise { 103 | const { baseParams } = this.processUnifiedParamsToAnthropicFormat(params); 104 | 105 | const originalStreamResponse = await this.anthropic.completions.create({ 106 | ...baseParams, 107 | model, 108 | stream: true, 109 | }); 110 | 111 | const stream = await this.parseStreamResponse(originalStreamResponse); 112 | return stream as unknown as UnifiedCreateChatCompletionStreamResult; 113 | } 114 | 115 | private processUnifiedParamsToAnthropicFormat( 116 | params: 117 | | UnifiedCreateChatCompletionParamsNonStreaming 118 | | UnifiedCreateChatCompletionParamsStreaming, 119 | ): { 120 | baseParams: Omit; 121 | prompt: string; 122 | } { 123 | let prompt = params.messages.reduce((acc, message) => { 124 | return `${acc}${ 125 | message.role === "user" ? "\n\nHuman" : "\n\nAssistant" 126 | }: ${message.content}`; 127 | }, ""); 128 | 129 | prompt += "\nAssistant:"; 130 | 131 | const encoding = new Tiktoken( 132 | cl100k_base.bpe_ranks, 133 | cl100k_base.special_tokens, 134 | cl100k_base.pat_str, 135 | ); 136 | encoding.free(); 137 | 138 | const baseParams = { 139 | max_tokens_to_sample: params.max_tokens ?? 300, 140 | temperature: params.temperature ?? undefined, 141 | top_p: params.top_p ?? undefined, 142 | prompt, 143 | }; 144 | 145 | return { baseParams, prompt }; 146 | } 147 | 148 | private trimLeadingSpaces(text: string): string { 149 | return text.replace(/^\s+/, ""); 150 | } 151 | 152 | private async parseStreamResponse( 153 | stream: Stream, 154 | ): Promise { 155 | const openaiStream = new Readable({ 156 | objectMode: true, 157 | read() {}, 158 | }); 159 | 160 | (async () => { 161 | for await (const chunk of stream) { 162 | const openaiChunk: OpenAI.Chat.Completions.ChatCompletionChunk = { 163 | id: "", 164 | object: "chat.completion.chunk", 165 | created: Date.now(), 166 | model: chunk.model, 167 | choices: [ 168 | { 169 | index: 0, 170 | delta: { content: chunk.completion, role: "assistant" }, 171 | finish_reason: this.getChunkFinishReason(chunk.stop_reason), 172 | }, 173 | ], 174 | }; 175 | openaiStream.push(openaiChunk); 176 | 177 | if (chunk.stop_reason === "stop_sequence") { 178 | openaiStream.push(null); 179 | break; 180 | } 181 | } 182 | })(); 183 | 184 | return openaiStream; 185 | } 186 | 187 | private getChunkFinishReason( 188 | anthropicStop: Anthropic.Completions.Completion["stop_reason"], 189 | ): OpenAI.Chat.Completions.ChatCompletionChunk["choices"][0]["finish_reason"] { 190 | if (anthropicStop === null) { 191 | return null; 192 | } 193 | 194 | if (anthropicStop === "stop_sequence") { 195 | return "stop"; 196 | } 197 | 198 | return null; 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /packages/unillm-node/providers/azure-openai.ts: -------------------------------------------------------------------------------- 1 | import { 2 | OpenAIClient, 3 | AzureKeyCredential, 4 | GetChatCompletionsOptions, 5 | ChatCompletions, 6 | } from "@azure/openai"; 7 | 8 | import OpenAI from "openai"; 9 | import { 10 | UnifiedCreateChatCompletionNonStreamResult, 11 | UnifiedCreateChatCompletionStreamResult, 12 | UnifiedCreateChatCompletionParamsNonStreaming, 13 | UnifiedCreateChatCompletionParamsStreaming, 14 | Providers, 15 | ModelTypes, 16 | } from "../utils/types"; 17 | 18 | import { Readable } from "stream"; 19 | import { BaseProvider } from "./baseProvider"; 20 | import { UnifiedErrorResponse } from "../utils/UnifiedErrorResponse"; 21 | 22 | type AzureOpenAIError = { 23 | message?: string; 24 | type?: string; 25 | param?: string | null; 26 | code?: string | null; 27 | }; 28 | 29 | export class AzureOpenAIProvider extends BaseProvider { 30 | private client = new OpenAIClient( 31 | process.env.AZURE_OPENAI_ENDPOINT as string, 32 | new AzureKeyCredential(process.env.AZURE_OPENAI_API_KEY as string), 33 | ); 34 | 35 | async createChatCompletionNonStreaming( 36 | _model: ModelTypes[Providers.AzureOpenAI], 37 | params: UnifiedCreateChatCompletionParamsNonStreaming, 38 | ): Promise { 39 | const [, model] = _model.split("/"); 40 | const { baseParams } = this.processUnifiedParamsToAzureOpenAIFormat(params); 41 | 42 | let nativeResult: ChatCompletions; 43 | 44 | try { 45 | nativeResult = await this.client.getChatCompletions( 46 | model, 47 | params.messages, 48 | { 49 | ...baseParams, 50 | stream: false, 51 | }, 52 | ); 53 | } catch (_error: any) { 54 | const error = this.getUnifiedErrorFromAzureOpenAIError( 55 | _error as AzureOpenAIError, 56 | model, 57 | ); 58 | throw error; 59 | } 60 | 61 | const choices: OpenAI.Chat.Completions.ChatCompletion["choices"] = 62 | nativeResult.choices.map( 63 | (choice): OpenAI.Chat.Completions.ChatCompletion["choices"][0] => ({ 64 | index: choice.index, 65 | finish_reason: 66 | choice.finishReason as OpenAI.Chat.Completions.ChatCompletion["choices"][0]["finish_reason"], 67 | message: { 68 | role: choice.message! 69 | .role as OpenAI.Chat.Completions.ChatCompletion["choices"][0]["message"]["role"], 70 | content: choice.message!.content ?? null, 71 | function_call: choice.message!.functionCall 72 | ? choice.message!.functionCall 73 | : undefined, 74 | }, 75 | }), 76 | ); 77 | 78 | const result: OpenAI.Chat.Completions.ChatCompletion = { 79 | id: nativeResult.id, 80 | choices, 81 | model, 82 | object: "chat.completion", 83 | usage: { 84 | prompt_tokens: nativeResult.usage!.promptTokens, 85 | completion_tokens: nativeResult.usage!.completionTokens, 86 | total_tokens: nativeResult.usage!.totalTokens, 87 | }, 88 | created: Date.now(), 89 | }; 90 | 91 | return result; 92 | } 93 | 94 | async createChatCompletionStreaming( 95 | _model: ModelTypes[Providers.AzureOpenAI], 96 | params: UnifiedCreateChatCompletionParamsStreaming, 97 | ): Promise { 98 | const [, model] = _model.split("/"); 99 | const { baseParams } = this.processUnifiedParamsToAzureOpenAIFormat(params); 100 | 101 | const originalStreamResponse = this.client.listChatCompletions( 102 | model, 103 | params.messages, 104 | { 105 | ...baseParams, 106 | stream: true, 107 | }, 108 | ); 109 | const stream = await this.parseStreamResponse( 110 | model, 111 | originalStreamResponse, 112 | ); 113 | return stream as unknown as UnifiedCreateChatCompletionStreamResult; 114 | } 115 | 116 | private processUnifiedParamsToAzureOpenAIFormat( 117 | params: 118 | | UnifiedCreateChatCompletionParamsNonStreaming 119 | | UnifiedCreateChatCompletionParamsStreaming, 120 | ): { baseParams: GetChatCompletionsOptions } { 121 | const baseParams: GetChatCompletionsOptions = { 122 | maxTokens: params.max_tokens ?? undefined, 123 | temperature: params.temperature ?? undefined, 124 | functions: params.functions ?? undefined, 125 | functionCall: params.function_call ?? undefined, 126 | topP: params.top_p ?? undefined, 127 | user: params.user ?? undefined, 128 | presencePenalty: params.presence_penalty ?? undefined, 129 | frequencyPenalty: params.frequency_penalty ?? undefined, 130 | }; 131 | 132 | return { baseParams }; 133 | } 134 | 135 | private async parseStreamResponse( 136 | deployment: string, 137 | stream: AsyncIterable, 138 | ): Promise { 139 | const openaiStream = new Readable({ 140 | objectMode: true, 141 | read() {}, 142 | }); 143 | 144 | (async () => { 145 | for await (const chunk of stream) { 146 | const openaiChunk: OpenAI.Chat.Completions.ChatCompletionChunk = { 147 | id: chunk.id, 148 | created: Number(chunk.created), 149 | object: "chat.completion.chunk", 150 | model: deployment, 151 | choices: chunk.choices.map((choice) => ({ 152 | index: choice.index, 153 | finish_reason: 154 | choice.finishReason as OpenAI.Chat.Completions.ChatCompletionChunk.Choice["finish_reason"], 155 | delta: 156 | choice.delta as OpenAI.Chat.Completions.ChatCompletionChunk.Choice["delta"], 157 | })), 158 | }; 159 | 160 | openaiStream.push(openaiChunk); 161 | } 162 | })(); 163 | 164 | return openaiStream; 165 | } 166 | 167 | private getUnifiedErrorFromAzureOpenAIError( 168 | error: AzureOpenAIError, 169 | deployment: ModelTypes[Providers.AzureOpenAI], 170 | ): UnifiedErrorResponse { 171 | let status = 500; 172 | 173 | // Sometimes Azure returns a status code 174 | if (typeof error.code === "number") { 175 | status = error.code; 176 | } else if (typeof error.code === "string") { 177 | if (!isNaN(Number(error.code))) { 178 | status = Number(error.code); 179 | } else { 180 | // Sometimes it returns strings 181 | switch (error.code) { 182 | case "DeploymentNotFound": 183 | status = 404; 184 | break; 185 | 186 | // Need to handle more cases, but this isn't documented anywhere. 187 | } 188 | } 189 | } 190 | 191 | // And sometime it will return the native OpenAI error type, if endpoint and deployment exist 192 | if (error.type) { 193 | switch (error.type) { 194 | case "invalid_request_error": 195 | status = 400; 196 | break; 197 | 198 | // Need to handle more cases, but this isn't documented anywhere. 199 | } 200 | } 201 | 202 | return new UnifiedErrorResponse( 203 | { 204 | model: `azure/openai/${deployment}`, 205 | }, 206 | status, 207 | error, 208 | error.message, 209 | {}, 210 | ); 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /packages/unillm-node/providers/baseProvider.ts: -------------------------------------------------------------------------------- 1 | import { 2 | UnifiedCreateChatCompletionParamsNonStreaming, 3 | UnifiedCreateChatCompletionParamsStreaming, 4 | UnifiedCreateChatCompletionNonStreamResult, 5 | UnifiedCreateChatCompletionStreamResult, 6 | ModelTypes, 7 | } from "../utils/types"; 8 | 9 | export abstract class BaseProvider { 10 | abstract createChatCompletionNonStreaming( 11 | model: ModelTypes[Provider], 12 | params: UnifiedCreateChatCompletionParamsNonStreaming, 13 | ): Promise; 14 | 15 | abstract createChatCompletionStreaming( 16 | model: ModelTypes[Provider], 17 | params: UnifiedCreateChatCompletionParamsStreaming, 18 | ): Promise; 19 | } 20 | -------------------------------------------------------------------------------- /packages/unillm-node/providers/openai.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from "openai"; 2 | import { 3 | UnifiedCreateChatCompletionParamsStreaming, 4 | UnifiedCreateChatCompletionParamsNonStreaming, 5 | UnifiedCreateChatCompletionNonStreamResult, 6 | UnifiedCreateChatCompletionStreamResult, 7 | Providers, 8 | ModelTypes, 9 | } from "../utils/types"; 10 | import { BaseProvider } from "./baseProvider"; 11 | import { APIError } from "openai/error"; 12 | import { UnifiedErrorResponse } from "../utils/UnifiedErrorResponse"; 13 | 14 | export class OpenAIProvider extends BaseProvider { 15 | private openai = new OpenAI(); 16 | 17 | async createChatCompletionNonStreaming( 18 | model: ModelTypes[Providers.OpenAI], 19 | params: UnifiedCreateChatCompletionParamsNonStreaming, 20 | ): Promise { 21 | try { 22 | const response = await this.openai.chat.completions.create({ 23 | ...params, 24 | model, 25 | stream: false, 26 | }); 27 | 28 | return response; 29 | } catch (_error: unknown) { 30 | if (!(_error instanceof APIError)) { 31 | throw _error; 32 | } 33 | 34 | const error = _error as APIError; 35 | throw new UnifiedErrorResponse( 36 | { 37 | model, 38 | }, 39 | error.status, 40 | error.error, 41 | error.message, 42 | error.headers, 43 | ); 44 | } 45 | } 46 | 47 | async createChatCompletionStreaming( 48 | model: ModelTypes[Providers.OpenAI], 49 | params: UnifiedCreateChatCompletionParamsStreaming, 50 | ): Promise { 51 | const stream = await this.openai.chat.completions.create({ 52 | ...params, 53 | model, 54 | stream: true, 55 | }); 56 | return stream; 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /packages/unillm-node/rollup.config.js: -------------------------------------------------------------------------------- 1 | const path = require("path"); 2 | const generatePackageJson = require("rollup-plugin-generate-package-json"); 3 | const commonjs = require("@rollup/plugin-commonjs"); 4 | const typescript2 = require("rollup-plugin-typescript2"); 5 | const copy = require("rollup-plugin-copy"); 6 | const { nodeResolve } = require("@rollup/plugin-node-resolve"); 7 | const json = require("@rollup/plugin-json"); 8 | const { wasm } = require("@rollup/plugin-wasm"); 9 | 10 | const localPackageJson = require(path.resolve(__dirname, "package.json")); 11 | 12 | const { defineConfig } = require("rollup"); 13 | 14 | module.exports = defineConfig({ 15 | input: "./index.ts", 16 | output: [ 17 | { 18 | format: "cjs", 19 | dir: "./dist", 20 | name: "unillm-node", 21 | entryFileNames: "[name].cjs.js", 22 | chunkFileNames: "[name]-[hash].cjs.js", 23 | sourcemap: true, 24 | }, 25 | { 26 | format: "esm", 27 | dir: "./dist", 28 | name: "unillm-node", 29 | entryFileNames: "[name].esm.js", 30 | chunkFileNames: "[name]-[hash].esm.js", 31 | sourcemap: true, 32 | }, 33 | ], 34 | external: ["@dqbd/tiktoken"], 35 | plugins: [ 36 | nodeResolve(), 37 | typescript2({ 38 | tsconfig: path.resolve(__dirname, "tsconfig.json"), 39 | compilerOptions: { outDir: "./dist", sourceMap: true }, 40 | useTsconfigDeclarationDir: true, 41 | }), 42 | commonjs(), 43 | wasm(), 44 | json(), 45 | generatePackageJson({ 46 | baseContents: () => ({ 47 | ...localPackageJson, 48 | devDependencies: {}, 49 | main: "./index.cjs.js", 50 | module: "./index.esm.js", 51 | types: "./types/packages/unillm-node/index.d.ts", 52 | }), 53 | }), 54 | copy({ 55 | targets: [ 56 | { 57 | src: path.resolve(__dirname, "../../", "README.md"), 58 | dest: "./dist", 59 | }, 60 | { 61 | src: path.resolve(__dirname, "../../", "LICENSE"), 62 | dest: "./dist", 63 | }, 64 | ], 65 | }), 66 | ], 67 | }); 68 | -------------------------------------------------------------------------------- /packages/unillm-node/tests/anthropic.test.ts: -------------------------------------------------------------------------------- 1 | import { UniLLM } from ".."; 2 | import { describe, expect, it } from "vitest"; 3 | import * as utils from "./utils/validation.util"; 4 | import type { ChatCompletionChunk } from "openai/resources/chat"; 5 | import { testParams } from "./utils/test-data.util"; 6 | 7 | const unillm = new UniLLM(); 8 | 9 | describe("#createChatCompletion - Anthropic", () => { 10 | const model = "anthropic/claude-2"; 11 | 12 | describe("Non streaming", () => { 13 | it("Should return a valid chat completion response", async () => { 14 | const response = await unillm.createChatCompletion(model, { 15 | ...testParams, 16 | stream: false, 17 | }); 18 | expect(() => 19 | utils.validateOpenAIChatCompletionResponse(response), 20 | ).not.toThrow(); 21 | }); 22 | 23 | it("Should throw an error and return a unified error response", async () => { 24 | let errorOccurred = false; 25 | try { 26 | await unillm.createChatCompletion(model, { 27 | ...testParams, 28 | stream: false, 29 | messages: [], 30 | }); 31 | } catch (error) { 32 | errorOccurred = true; 33 | expect(() => 34 | utils.validateOpenAIChatCompletionErrorResponse(error), 35 | ).not.toThrow(); 36 | } 37 | expect(errorOccurred).toBeTruthy(); 38 | }); 39 | }); 40 | 41 | describe("Streaming", () => { 42 | it("Should return a valid iterable chat completion stream", async () => { 43 | const response = await unillm.createChatCompletion(model, { 44 | ...testParams, 45 | stream: true, 46 | }); 47 | 48 | let testChunk: ChatCompletionChunk; 49 | 50 | for await (const chunk of response) { 51 | testChunk = chunk; 52 | break; 53 | } 54 | 55 | expect(() => 56 | utils.validateOpenAIChatCompletionChunk(testChunk), 57 | ).not.toThrow(); 58 | }); 59 | }); 60 | }); 61 | -------------------------------------------------------------------------------- /packages/unillm-node/tests/azure-openai.test.ts: -------------------------------------------------------------------------------- 1 | import { UniLLM } from ".."; 2 | import { describe, expect, it } from "vitest"; 3 | import * as utils from "./utils/validation.util"; 4 | import type { ChatCompletionChunk } from "openai/resources/chat"; 5 | import { testFunctions, testParams } from "./utils/test-data.util"; 6 | 7 | const deployment = process.env.AZURE_OPENAI_DEPLOYMENT; 8 | const unillm = new UniLLM(); 9 | 10 | describe("#createChatCompletion - Azure OpenAI", () => { 11 | describe("Non streaming", () => { 12 | it("Should return a valid chat completion response", async () => { 13 | const response = await unillm.createChatCompletion( 14 | `azure/openai/${deployment}`, 15 | { 16 | ...testParams, 17 | stream: false, 18 | }, 19 | ); 20 | expect(() => 21 | utils.validateOpenAIChatCompletionResponse(response), 22 | ).not.toThrow(); 23 | }); 24 | 25 | it("Should return a valid function calling response", async () => { 26 | const response = await unillm.createChatCompletion( 27 | `azure/openai/${deployment}`, 28 | { 29 | ...testParams, 30 | stream: false, 31 | functions: testFunctions, 32 | }, 33 | ); 34 | expect(() => 35 | utils.validateOpenAIChatCompletionResponse(response), 36 | ).not.toThrow(); 37 | }); 38 | 39 | it("Should throw an error and return a unified error response", async () => { 40 | let errorOccurred = false; 41 | try { 42 | await unillm.createChatCompletion(`azure/openai/${deployment}`, { 43 | ...testParams, 44 | stream: false, 45 | messages: [], 46 | }); 47 | } catch (error) { 48 | errorOccurred = true; 49 | expect(() => 50 | utils.validateOpenAIChatCompletionErrorResponse(error), 51 | ).not.toThrow(); 52 | } 53 | expect(errorOccurred).toBeTruthy(); 54 | }); 55 | }); 56 | 57 | describe("Streaming", () => { 58 | it("Should return a valid iterable chat completion stream", async () => { 59 | const stream = await unillm.createChatCompletion( 60 | `azure/openai/${deployment}`, 61 | { 62 | ...testParams, 63 | stream: true, 64 | }, 65 | ); 66 | 67 | let testChunk: ChatCompletionChunk; 68 | 69 | for await (const chunk of stream) { 70 | testChunk = chunk; 71 | break; 72 | } 73 | 74 | expect(() => 75 | utils.validateOpenAIChatCompletionChunk(testChunk), 76 | ).not.toThrow(); 77 | }); 78 | }); 79 | }); 80 | -------------------------------------------------------------------------------- /packages/unillm-node/tests/openai.test.ts: -------------------------------------------------------------------------------- 1 | import { UniLLM } from ".."; 2 | import { describe, expect, it } from "vitest"; 3 | import * as utils from "./utils/validation.util"; 4 | import type { ChatCompletionChunk } from "openai/resources/chat"; 5 | import { testParams, testFunctions } from "./utils/test-data.util"; 6 | 7 | const unillm = new UniLLM(); 8 | 9 | describe("#createChatCompletion - OpenAI", () => { 10 | const model = "openai/gpt-3.5-turbo"; 11 | 12 | describe("Non streaming", () => { 13 | it("Should return a valid chat completion response", async () => { 14 | const response = await unillm.createChatCompletion(model, { 15 | ...testParams, 16 | stream: false, 17 | }); 18 | expect(() => 19 | utils.validateOpenAIChatCompletionResponse(response), 20 | ).not.toThrow(); 21 | }); 22 | 23 | it("Should return a valid function calling response", async () => { 24 | const response = await unillm.createChatCompletion(model, { 25 | ...testParams, 26 | stream: false, 27 | functions: testFunctions, 28 | }); 29 | expect(() => 30 | utils.validateOpenAIChatCompletionResponse(response), 31 | ).not.toThrow(); 32 | }); 33 | 34 | it("Should throw an error and return a unified error response", async () => { 35 | let errorOccurred = false; 36 | try { 37 | await unillm.createChatCompletion(model, { 38 | ...testParams, 39 | stream: false, 40 | messages: [], 41 | }); 42 | } catch (error) { 43 | errorOccurred = true; 44 | expect(() => 45 | utils.validateOpenAIChatCompletionErrorResponse(error), 46 | ).not.toThrow(); 47 | } 48 | expect(errorOccurred).toBeTruthy(); 49 | }); 50 | }); 51 | 52 | describe("Streaming", () => { 53 | it("Should return a valid iterable chat completion stream", async () => { 54 | const stream = await unillm.createChatCompletion(model, { 55 | ...testParams, 56 | stream: true, 57 | }); 58 | 59 | let testChunk: ChatCompletionChunk; 60 | 61 | for await (const chunk of stream) { 62 | testChunk = chunk; 63 | break; 64 | } 65 | 66 | expect(() => 67 | utils.validateOpenAIChatCompletionChunk(testChunk), 68 | ).not.toThrow(); 69 | }); 70 | }); 71 | }); 72 | -------------------------------------------------------------------------------- /packages/unillm-node/tests/utils/test-data.util.ts: -------------------------------------------------------------------------------- 1 | import { UnifiedCreateChatCompletionParamsBase } from "../../utils/types"; 2 | 3 | export const testParams: UnifiedCreateChatCompletionParamsBase = { 4 | temperature: 0, 5 | max_tokens: 50, 6 | messages: [ 7 | { 8 | role: "user", 9 | content: "How much is 2+2?", 10 | }, 11 | ], 12 | }; 13 | 14 | export const testFunctions: UnifiedCreateChatCompletionParamsBase["functions"] = 15 | [ 16 | { 17 | name: "add", 18 | description: "Adds two numbers", 19 | parameters: { 20 | type: "object", 21 | properties: { 22 | num1: { 23 | type: "number", 24 | description: "First number to add", 25 | }, 26 | num2: { 27 | type: "number", 28 | description: "Second number to add", 29 | }, 30 | }, 31 | }, 32 | }, 33 | ]; 34 | -------------------------------------------------------------------------------- /packages/unillm-node/tests/utils/validation.util.ts: -------------------------------------------------------------------------------- 1 | import { z } from "zod"; 2 | import type { 3 | ChatCompletion, 4 | ChatCompletionChunk, 5 | } from "openai/resources/chat"; 6 | 7 | import { UnifiedCreateChatCompletionNonStreamResult } from "../../utils/types"; 8 | import { UnifiedErrorResponse } from "../../utils/UnifiedErrorResponse"; 9 | 10 | /** 11 | * Validates an object against the OpenAI ChatCompletion response schema. 12 | */ 13 | export function validateOpenAIChatCompletionResponse( 14 | obj: UnifiedCreateChatCompletionNonStreamResult, 15 | ) { 16 | const schema: z.ZodType = z.strictObject({ 17 | id: z.string(), 18 | choices: z.array( 19 | z.strictObject({ 20 | finish_reason: z.enum([ 21 | "stop", 22 | "length", 23 | "function_call", 24 | "content_filter", 25 | ]), 26 | index: z.number(), 27 | // One of 28 | message: z.union([ 29 | // Content 30 | z.strictObject({ 31 | content: z.string(), 32 | role: z.enum(["user", "assistant", "system", "assistant"]), 33 | function_call: z.undefined(), 34 | }), 35 | // or, function call 36 | z.strictObject({ 37 | content: z.null(), 38 | role: z.enum(["assistant"]), 39 | function_call: z 40 | .strictObject({ 41 | arguments: z.string(), 42 | name: z.string(), 43 | }) 44 | .optional(), 45 | }), 46 | ]), 47 | }), 48 | ), 49 | created: z.number(), 50 | model: z.string(), 51 | object: z.string(), 52 | usage: z.strictObject({ 53 | prompt_tokens: z.number(), 54 | completion_tokens: z.number(), 55 | total_tokens: z.number(), 56 | }), 57 | }); 58 | 59 | schema.parse(obj); 60 | } 61 | 62 | /** 63 | * Validates an object against the OpenAI ChatCompletionChunk schema. 64 | */ 65 | export function validateOpenAIChatCompletionChunk(obj: ChatCompletionChunk) { 66 | const schema: z.ZodType = z.strictObject({ 67 | id: z.string(), 68 | object: z.enum(["chat.completion.chunk"]), 69 | created: z.number(), 70 | model: z.string(), 71 | choices: z.array( 72 | z.strictObject({ 73 | index: z.number(), 74 | finish_reason: z 75 | .enum(["stop", "length", "function_call", "content_filter"]) 76 | .nullable(), 77 | // One of 78 | delta: z.union([ 79 | // Content 80 | z.strictObject({ 81 | content: z.string().nullable(), 82 | role: z.enum(["user", "assistant", "system", "function"]), 83 | }), 84 | // or, function call 85 | z.strictObject({ 86 | function_call: z.strictObject({ 87 | arguments: z.string(), 88 | name: z.string(), 89 | }), 90 | }), 91 | ]), 92 | }), 93 | ), 94 | }); 95 | 96 | schema.parse(obj); 97 | } 98 | 99 | export function validateOpenAIChatCompletionErrorResponse( 100 | error: UnifiedErrorResponse, 101 | ) { 102 | const schema = z.strictObject({ 103 | status: z.number(), 104 | headers: z.record(z.string()), 105 | param: z.string().nullable().optional(), 106 | code: z.string().nullable().optional(), 107 | type: z.string().optional(), 108 | error: z.strictObject({ 109 | message: z.string(), 110 | type: z.string().optional(), 111 | param: z.string().nullable().optional(), 112 | code: z.string().nullable().optional(), 113 | }), 114 | metadata: z.strictObject({ 115 | model: z.string(), 116 | }), 117 | }); 118 | 119 | schema.parse(error); 120 | } 121 | -------------------------------------------------------------------------------- /packages/unillm-node/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "tsconfig/base", 3 | "include": ["./index.ts"], 4 | "exclude": ["dist", "build", "node_modules", "**/.test.ts"], 5 | "compilerOptions": { 6 | "target": "es2015", 7 | "lib": ["es2017"], 8 | "outDir": "./dist", 9 | "moduleResolution": "node", 10 | "sourceMap": true, 11 | "declarationDir": "./dist/types", 12 | "resolveJsonModule": true 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /packages/unillm-node/turbo/generators/config.ts: -------------------------------------------------------------------------------- 1 | import type { PlopTypes } from "@turbo/gen"; 2 | 3 | // Learn more about Turborepo Generators at https://turbo.build/repo/docs/core-concepts/monorepos/code-generation 4 | 5 | // eslint-disable-next-line import/no-default-export -- Turbo generators require default export 6 | export default function generator(plop: PlopTypes.NodePlopAPI): void { 7 | // A simple generator to add a new React component to the internal UI library 8 | plop.setGenerator("react-component", { 9 | description: "Adds a new react component", 10 | prompts: [ 11 | { 12 | type: "input", 13 | name: "name", 14 | message: "What is the name of the component?", 15 | }, 16 | ], 17 | actions: [ 18 | { 19 | type: "add", 20 | path: "{{pascalCase name}}.tsx", 21 | templateFile: "templates/component.hbs", 22 | }, 23 | { 24 | type: "append", 25 | path: "index.tsx", 26 | pattern: /(?\/\/ component exports)/g, 27 | template: 'export * from "./{{pascalCase name}}";', 28 | }, 29 | ], 30 | }); 31 | } 32 | -------------------------------------------------------------------------------- /packages/unillm-node/turbo/generators/templates/component.hbs: -------------------------------------------------------------------------------- 1 | import * as React from "react"; interface Props { children?: React.ReactNode; } 2 | export const 3 | {{pascalCase name}} 4 | = ({ children }: Props) => { return ( 5 |
6 |

{{name}}

7 | {children} 8 |
9 | ); }; -------------------------------------------------------------------------------- /packages/unillm-node/utils/UnifiedErrorResponse.ts: -------------------------------------------------------------------------------- 1 | import { APIError } from "openai/error"; 2 | import { Headers } from "openai/core"; 3 | import { Model } from "llm-repo"; 4 | 5 | export type UniLLMMetadata = { 6 | model: Model; 7 | }; 8 | 9 | export class UnifiedErrorResponse extends APIError { 10 | constructor( 11 | public metadata: UniLLMMetadata, 12 | status: number | undefined, 13 | error: Object | undefined, 14 | message: string | undefined, 15 | headers: Headers | undefined, 16 | ) { 17 | super(status, error, message, headers); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /packages/unillm-node/utils/properties.ts: -------------------------------------------------------------------------------- 1 | export const properties = {}; 2 | -------------------------------------------------------------------------------- /packages/unillm-node/utils/types.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from "openai"; 2 | import Anthropic from "@anthropic-ai/sdk"; 3 | 4 | type StaticParamValues = { 5 | "openai/gpt-3.5-turbo": "openai/gpt-3.5-turbo"; 6 | "openai/gpt-4": "openai/gpt-4"; 7 | "anthropic/claude-2": "anthropic/claude-2"; 8 | }; 9 | 10 | type AzureOpenAIDynamicValue = { 11 | [key in `azure/openai/${string}`]?: string; 12 | }; 13 | 14 | export type ModelParamValues = StaticParamValues & AzureOpenAIDynamicValue; 15 | 16 | export enum Providers { 17 | OpenAI, 18 | Anthropic, 19 | AzureOpenAI, 20 | } 21 | 22 | export type ModelTypes = { 23 | [Providers.OpenAI]: OpenAI.CompletionCreateParams["model"]; 24 | [Providers.Anthropic]: Anthropic.CompletionCreateParams["model"]; 25 | [Providers.AzureOpenAI]: string; 26 | }; 27 | 28 | export type UnifiedCreateChatCompletionParamsBase = Omit< 29 | OpenAI.Chat.Completions.ChatCompletionCreateParams, 30 | "model" 31 | >; 32 | export type UnifiedCreateChatCompletionParamsStreaming = Omit< 33 | OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming, 34 | "model" 35 | >; 36 | export type UnifiedCreateChatCompletionParamsNonStreaming = Omit< 37 | OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming, 38 | "model" 39 | >; 40 | 41 | export type UnifiedCreateChatCompletionNonStreamResult = 42 | OpenAI.Chat.Completions.ChatCompletion; 43 | export type UnifiedCreateChatCompletionStreamResult = 44 | AsyncIterable; 45 | -------------------------------------------------------------------------------- /packages/unillm-node/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "vitest/config"; 2 | 3 | export default defineConfig({ 4 | test: { 5 | testTimeout: 30000, 6 | }, 7 | }); 8 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "tsconfig/base.json", 3 | "compilerOptions": { 4 | "resolveJsonModule": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /turbo.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://turbo.build/schema.json", 3 | "globalDependencies": ["**/.env.*local", "./.env.*local"], 4 | "pipeline": { 5 | "build": { 6 | "cache": false, 7 | "dependsOn": ["^build"], 8 | "outputs": [".next/**", "!.next/cache/**"] 9 | }, 10 | "test": {}, 11 | "lint": {}, 12 | "format": { 13 | "cache": false 14 | }, 15 | "dev": { 16 | "cache": false, 17 | "persistent": false 18 | } 19 | } 20 | } 21 | --------------------------------------------------------------------------------