├── .nvmrc ├── .yarn └── versions │ └── 6e9a633f.yml ├── src ├── global.d.ts ├── buildInfo.ts ├── langchain │ ├── index.ts │ ├── prompt-template.test.ts │ ├── prompt-template.ts │ ├── llm.ts │ └── llm-chat.ts ├── helpers │ ├── __mocks__ │ │ └── config.ts │ ├── types.ts │ ├── config.ts │ ├── config.test.ts │ └── common.ts ├── index.ts ├── services │ ├── BaseService.ts │ ├── text │ │ ├── TextTokenizationService.ts │ │ ├── TextEmbeddingService.ts │ │ ├── TextSentenceSimilarityService.ts │ │ ├── TextChatService.ts │ │ ├── TextGenerationService.ts │ │ └── TextService.ts │ ├── ModelService.ts │ ├── RequestService.ts │ ├── UserService.ts │ ├── PromptService.ts │ ├── TuneService.ts │ ├── SystemPromptService.ts │ └── FileService.ts ├── utils │ ├── types.ts │ ├── concurrency.ts │ ├── stream.ts │ └── errors.ts ├── api │ ├── client.ts │ └── streaming-client.ts ├── errors.ts ├── client.test.ts ├── client.ts └── schema.ts ├── .env ├── commitlint.config.cjs ├── .env.test ├── .whitesource ├── tsconfig.build.json ├── .husky ├── pre-commit └── commit-msg ├── .prettierignore ├── docs └── img │ └── rainbow.png ├── .prettierrc ├── scripts ├── generate.sh ├── examples.sh └── generate.js ├── examples ├── assets │ └── tune_input.jsonl ├── shared │ └── constants.ts ├── similarity.ts ├── models.ts ├── langchain │ ├── prompt-templates.ts │ ├── llm-chat.ts │ └── llm.ts ├── generate.ts ├── history.ts ├── chat.ts ├── file.ts └── tune.ts ├── .eslintignore ├── tests ├── utils.ts ├── mocks │ ├── server.ts │ └── handlers.ts ├── setup.ts ├── e2e │ ├── services │ │ └── FileService.test.ts │ ├── langchain │ │ ├── llm-chat.test.ts │ │ └── llm.test.ts │ └── client.test.ts └── integration │ ├── errors.test.ts │ └── client.test.ts ├── .editorconfig ├── .yarnrc.yml ├── .gitignore ├── vite.config.ts ├── .github └── workflows │ ├── npm-publish.yml │ └── node.js.yml ├── tsconfig.json ├── tsup.config.ts ├── LICENSE ├── .eslintrc.cjs ├── package.json └── README.md /.nvmrc: -------------------------------------------------------------------------------- 1 | v18.18.2 -------------------------------------------------------------------------------- /.yarn/versions/6e9a633f.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/global.d.ts: -------------------------------------------------------------------------------- 1 | import 'jest-extended'; 2 | -------------------------------------------------------------------------------- /.env: -------------------------------------------------------------------------------- 1 | GENAI_DEFAULT_ENDPOINT=https://bam-api.res.ibm.com 2 | -------------------------------------------------------------------------------- /src/buildInfo.ts: -------------------------------------------------------------------------------- 1 | export const version = process.env.VERSION; 2 | -------------------------------------------------------------------------------- /commitlint.config.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { extends: ['@commitlint/config-conventional'] }; 2 | -------------------------------------------------------------------------------- /.env.test: -------------------------------------------------------------------------------- 1 | ENDPOINT=https://bam-api.res.ibm.com 2 | API_KEY= 3 | RUN_LANGCHAIN_CHAT_TESTS=false 4 | -------------------------------------------------------------------------------- /.whitesource: -------------------------------------------------------------------------------- 1 | { 2 | "settingsInheritedFrom": "whitesource-config/whitesource-config@master" 3 | } -------------------------------------------------------------------------------- /tsconfig.build.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "./tsconfig.json", 3 | "exclude": ["examples"] 4 | } 5 | -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | . "$(dirname -- "$0")/_/husky.sh" 3 | 4 | yarn lint-staged 5 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | .vscode/ 3 | .yarn/ 4 | yarn.lock 5 | .pnp.* 6 | 7 | /src/api/schema.d.ts -------------------------------------------------------------------------------- /docs/img/rainbow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IBM/ibm-generative-ai-node-sdk/HEAD/docs/img/rainbow.png -------------------------------------------------------------------------------- /.husky/commit-msg: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | . "$(dirname -- "$0")/_/husky.sh" 3 | 4 | yarn commitlint --edit $1 5 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "arrowParens": "always", 3 | "singleQuote": true, 4 | "trailingComma": "all" 5 | } 6 | -------------------------------------------------------------------------------- /scripts/generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source .env 4 | SCHEMA_URL=$GENAI_DEFAULT_ENDPOINT/docs/json node ./scripts/generate.js -------------------------------------------------------------------------------- /src/langchain/index.ts: -------------------------------------------------------------------------------- 1 | export * from './llm.js'; 2 | export * from './llm-chat.js'; 3 | export * from './prompt-template.js'; 4 | -------------------------------------------------------------------------------- /examples/assets/tune_input.jsonl: -------------------------------------------------------------------------------- 1 | {"input": "foo", "output": "bar"} 2 | {"input": "foo", "output": "bar"} 3 | {"input": "foo", "output": "bar"} -------------------------------------------------------------------------------- /src/helpers/__mocks__/config.ts: -------------------------------------------------------------------------------- 1 | export const lookupEndpoint = vi.fn(() => null); 2 | 3 | export const lookupApiKey = vi.fn(() => null); 4 | -------------------------------------------------------------------------------- /.eslintignore: -------------------------------------------------------------------------------- 1 | /node_modules/**/* 2 | /dist/**/* 3 | .eslintrc.cjs 4 | tsup.config.ts 5 | /scripts 6 | 7 | # generated files 8 | /src/api/schema.d.ts -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export type * from './schema.js'; 2 | 3 | export * from './client.js'; 4 | export * from './errors.js'; 5 | 6 | export * from './buildInfo.js'; 7 | -------------------------------------------------------------------------------- /tests/utils.ts: -------------------------------------------------------------------------------- 1 | export function describeIf( 2 | value: boolean, 3 | ): typeof describe | typeof describe.skip { 4 | return value ? describe : describe.skip; 5 | } 6 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | 7 | [*.{js,json,yml}] 8 | charset = utf-8 9 | indent_style = space 10 | indent_size = 2 11 | -------------------------------------------------------------------------------- /.yarnrc.yml: -------------------------------------------------------------------------------- 1 | nodeLinker: node-modules 2 | 3 | plugins: 4 | - path: .yarn/plugins/@yarnpkg/plugin-version.cjs 5 | spec: "@yarnpkg/plugin-version" 6 | 7 | yarnPath: .yarn/releases/yarn-3.5.0.cjs 8 | -------------------------------------------------------------------------------- /tests/mocks/server.ts: -------------------------------------------------------------------------------- 1 | import { SetupServer, setupServer } from 'msw/node'; 2 | 3 | import { handlers } from './handlers.js'; 4 | 5 | export const server: SetupServer = setupServer(...handlers); 6 | -------------------------------------------------------------------------------- /examples/shared/constants.ts: -------------------------------------------------------------------------------- 1 | export const MODEL = 'google/flan-ul2'; 2 | export const CHAT_MODEL = 'meta-llama/llama-3-70b-instruct'; 3 | export const EMBEDDING_MODEL = 'sentence-transformers/all-minilm-l6-v2'; 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .yarn/* 2 | !.yarn/patches 3 | !.yarn/plugins 4 | !.yarn/releases 5 | !.yarn/sdks 6 | !.yarn/versions 7 | 8 | .pnp.* 9 | node_modules 10 | 11 | /dist 12 | 13 | # local env files 14 | .env*.local 15 | 16 | /coverage 17 | -------------------------------------------------------------------------------- /scripts/examples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Run all modules in examples/ directory 6 | for file in examples/* examples/langchain/*; do 7 | if [ -f "$file" ]; then 8 | echo "Running example $file" 9 | npx ts-node -r dotenv-flow/config "$file" > /dev/null 10 | fi 11 | done -------------------------------------------------------------------------------- /tests/setup.ts: -------------------------------------------------------------------------------- 1 | import { server } from './mocks/server.js'; 2 | import { resetStores } from './mocks/handlers.js'; 3 | 4 | beforeAll(() => server.listen({ onUnhandledRequest: 'bypass' })); 5 | afterAll(() => server.close()); 6 | afterEach(() => { 7 | resetStores(); 8 | server.resetHandlers(); 9 | }); 10 | -------------------------------------------------------------------------------- /src/services/BaseService.ts: -------------------------------------------------------------------------------- 1 | import { ApiClient } from '../api/client.js'; 2 | import { SteamingApiClient } from '../api/streaming-client.js'; 3 | 4 | export abstract class BaseService { 5 | constructor( 6 | protected readonly _client: ApiClient, 7 | protected readonly _streamingClient: SteamingApiClient, 8 | ) {} 9 | } 10 | -------------------------------------------------------------------------------- /vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig, defaultExclude } from 'vitest/config'; 2 | 3 | export default defineConfig({ 4 | test: { 5 | globals: true, 6 | setupFiles: ['dotenv-flow/config', 'jest-extended/all', './tests/setup.ts'], 7 | coverage: { 8 | exclude: ['**/__mocks__/**'], 9 | }, 10 | exclude: [...defaultExclude], 11 | testTimeout: 120 * 1000, 12 | }, 13 | }); 14 | -------------------------------------------------------------------------------- /examples/similarity.ts: -------------------------------------------------------------------------------- 1 | import { Client } from '../src/index.js'; 2 | 3 | import { EMBEDDING_MODEL } from './shared/constants.js'; 4 | 5 | const client = new Client({ 6 | apiKey: process.env.GENAI_API_KEY, 7 | }); 8 | 9 | { 10 | const output = await client.text.experimental.sentenceSimilarity.create({ 11 | model_id: EMBEDDING_MODEL, 12 | source_sentence: 'Good morning', 13 | sentences: ['How are you?', 'Get lost!'], 14 | }); 15 | console.log(output); 16 | } 17 | -------------------------------------------------------------------------------- /examples/models.ts: -------------------------------------------------------------------------------- 1 | import { Client } from '../src/index.js'; 2 | 3 | import { MODEL } from './shared/constants.js'; 4 | 5 | const client = new Client({ 6 | apiKey: process.env.GENAI_API_KEY, 7 | }); 8 | 9 | { 10 | // List first hundred models 11 | const { results } = await client.model.list({ limit: 100, offset: 0 }); 12 | console.log(results); 13 | } 14 | 15 | { 16 | // Retrieve info about a specific model 17 | const { result } = await client.model.retrieve({ id: MODEL }); 18 | console.log(result); 19 | } 20 | -------------------------------------------------------------------------------- /src/utils/types.ts: -------------------------------------------------------------------------------- 1 | type UnionKeys = T extends T ? keyof T : never; 2 | type StrictUnionHelper = T extends any 3 | ? T & Partial, keyof T>, never>> 4 | : never; 5 | export type StrictUnion = StrictUnionHelper; 6 | 7 | export type FilterKeys = { 8 | [K in keyof Obj]: K extends Matchers ? Obj[K] : never; 9 | }[keyof Obj]; 10 | 11 | export type OmitVersion = Omit; 12 | 13 | export type Empty = Record; 14 | 15 | export type Replace = Omit & U; 16 | -------------------------------------------------------------------------------- /scripts/generate.js: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs'; 2 | 3 | import openapiTS from 'openapi-typescript'; 4 | 5 | // https://openapi-ts.pages.dev/6.x/node#example-blob-types 6 | const contents = await openapiTS( 7 | new URL(process.env.SCHEMA_URL, import.meta.url), 8 | { 9 | transform(schemaObject, metadata) { 10 | if ('format' in schemaObject && schemaObject.format === 'binary') { 11 | return schemaObject.nullable ? 'Blob | null' : 'Blob'; 12 | } 13 | }, 14 | }, 15 | ); 16 | 17 | // (optional) write to file 18 | fs.writeFileSync('./src/api/schema.d.ts', contents); 19 | -------------------------------------------------------------------------------- /src/helpers/types.ts: -------------------------------------------------------------------------------- 1 | export type RequiredPartial = Required> & 2 | Partial>; 3 | 4 | export type FlagOption = T extends true 5 | ? { [k in Key]: true } 6 | : { [k in Key]?: false }; 7 | 8 | export type FalsyValues = false | '' | 0 | null | undefined; 9 | export type Truthy = T extends FalsyValues ? never : T; 10 | 11 | export type ErrorCallback = (err: unknown) => void; 12 | export type DataCallback = (err: unknown, result: T) => void; 13 | export type Callback = ErrorCallback | DataCallback; 14 | -------------------------------------------------------------------------------- /.github/workflows/npm-publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish on Release 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | env: 8 | GENAI_DEFAULT_ENDPOINT: ${{ vars.GENAI_DEFAULT_ENDPOINT }} 9 | 10 | jobs: 11 | publish-npm: 12 | runs-on: ubuntu-latest 13 | environment: production 14 | steps: 15 | - uses: actions/checkout@v4 16 | - uses: actions/setup-node@v4 17 | with: 18 | node-version: 16 19 | registry-url: https://registry.npmjs.org/ 20 | - run: yarn install --immutable 21 | - run: yarn npm publish --access public 22 | env: 23 | YARN_NPM_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 24 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2022", 4 | "module": "Node16", 5 | "moduleResolution": "Node16", 6 | "lib": [ 7 | "ES2022" 8 | ], 9 | "declaration": true, 10 | "noEmit": true, 11 | "esModuleInterop": true, 12 | "forceConsistentCasingInFileNames": true, 13 | "strict": true, 14 | "skipLibCheck": true, 15 | "resolveJsonModule": true, 16 | "types": [ 17 | "vitest/globals", 18 | "node" 19 | ] 20 | }, 21 | "include": [ 22 | "src", 23 | "examples", 24 | "tests" 25 | ], 26 | "files": [ 27 | "vite.config.ts" 28 | ], 29 | "ts-node": { 30 | "esm": true 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/services/text/TextTokenizationService.ts: -------------------------------------------------------------------------------- 1 | import { BaseService } from '../BaseService.js'; 2 | import { Options } from '../../client.js'; 3 | import { clientErrorWrapper } from '../../utils/errors.js'; 4 | import { 5 | TextTokenizationCreateInput, 6 | TextTokenizationCreateOutput, 7 | } from '../../schema.js'; 8 | 9 | export class TextTokenizationService extends BaseService { 10 | create( 11 | input: TextTokenizationCreateInput, 12 | opts?: Options, 13 | ): Promise { 14 | return clientErrorWrapper( 15 | this._client.POST('/v2/text/tokenization', { 16 | ...opts, 17 | params: { query: { version: '2024-01-10' } }, 18 | body: input, 19 | }), 20 | ); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /tsup.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'tsup'; 2 | import dotenv from 'dotenv-flow'; 3 | import pkgInfo from './package.json' assert { type: 'json' }; 4 | 5 | dotenv.config(); 6 | 7 | const GENAI_DEFAULT_ENDPOINT = process.env.GENAI_DEFAULT_ENDPOINT; 8 | if (!GENAI_DEFAULT_ENDPOINT) 9 | throw new Error('Missing GENAI_DEFAULT_ENDPOINT env variable'); 10 | 11 | export default defineConfig({ 12 | entry: ['src/index.ts', 'src/langchain/index.ts'], 13 | tsconfig: 'tsconfig.build.json', 14 | clean: true, 15 | dts: true, 16 | format: ['esm', 'cjs'], 17 | platform: 'node', 18 | shims: true, 19 | splitting: true, 20 | env: { 21 | NODE_ENV: 'production', 22 | GENAI_DEFAULT_ENDPOINT: GENAI_DEFAULT_ENDPOINT, 23 | VERSION: pkgInfo.version, 24 | }, 25 | }); 26 | -------------------------------------------------------------------------------- /examples/langchain/prompt-templates.ts: -------------------------------------------------------------------------------- 1 | import { PromptTemplate } from '@langchain/core/prompts'; 2 | 3 | import { GenAIPromptTemplate } from '../../src/langchain/index.js'; 4 | 5 | { 6 | // Converting the LangChain Prompt Template (f-string) to GenAI Prompt Template' 7 | const result = GenAIPromptTemplate.fromLangChain( 8 | PromptTemplate.fromTemplate(`Tell me a {adjective} joke about {content}.`), 9 | ); 10 | console.log(result); // "Tell me a {{adjective}} joke about {{content}}." 11 | } 12 | 13 | { 14 | // Converting the GenAI Prompt Template to LangChain Prompt Template 15 | const result = GenAIPromptTemplate.toLangChain( 16 | `Tell me a {{adjective}} joke about {{content}}.`, 17 | ); 18 | console.log(result); // "Tell me a {adjective} joke about {content}." 19 | } 20 | -------------------------------------------------------------------------------- /examples/generate.ts: -------------------------------------------------------------------------------- 1 | import { Client } from '../src/index.js'; 2 | 3 | import { MODEL } from './shared/constants.js'; 4 | 5 | const client = new Client({ 6 | apiKey: process.env.GENAI_API_KEY, 7 | }); 8 | 9 | const input = { model_id: MODEL, input: 'How are you?' }; 10 | 11 | { 12 | const output = await client.text.generation.create(input); 13 | console.log(output); 14 | } 15 | 16 | { 17 | // Streaming (async iterators) 18 | const stream = await client.text.generation.create_stream(input); 19 | for await (const chunk of stream) { 20 | const result = chunk.results?.at(0); 21 | if (result) { 22 | console.log(result.stop_reason); 23 | console.log(result.generated_token_count); 24 | console.log(result.input_token_count); 25 | console.log(result.generated_text); 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/helpers/config.ts: -------------------------------------------------------------------------------- 1 | import os from 'node:os'; 2 | import path from 'node:path'; 3 | import fs from 'node:fs'; 4 | 5 | import YAML from 'yaml'; 6 | 7 | export function lookupEndpoint(): string | null { 8 | return ( 9 | process.env.GENAI_ENDPOINT || process.env.GENAI_DEFAULT_ENDPOINT || null 10 | ); 11 | } 12 | 13 | export function lookupApiKey(): string | null { 14 | if (process.env.GENAI_API_KEY) { 15 | return process.env.GENAI_API_KEY; 16 | } 17 | 18 | const credentialsPath = path.join(os.homedir(), '.genai', 'credentials.yml'); 19 | if (fs.existsSync(credentialsPath)) { 20 | try { 21 | const fileContent = fs.readFileSync(credentialsPath, 'utf8'); 22 | return YAML.parse(fileContent).apiKey; 23 | } catch (err) { 24 | console.warn('Failed to read credentials'); 25 | } 26 | } 27 | 28 | return null; 29 | } 30 | -------------------------------------------------------------------------------- /examples/history.ts: -------------------------------------------------------------------------------- 1 | import { Client } from '../src/index.js'; 2 | 3 | import { CHAT_MODEL } from './shared/constants.js'; 4 | 5 | const client = new Client({ 6 | apiKey: process.env.GENAI_API_KEY, 7 | }); 8 | 9 | { 10 | // List historical success requests to the API 11 | const { results } = await client.request.list({ 12 | origin: 'api', 13 | status: 'success', 14 | }); 15 | for (const request of results) { 16 | console.log(request); 17 | } 18 | } 19 | 20 | { 21 | // List all requests related to a chat conversation 22 | const { conversation_id } = await client.text.chat.create({ 23 | model_id: CHAT_MODEL, 24 | messages: [{ role: 'user', content: 'How are you?' }], 25 | }); 26 | const { results } = await client.request.chat({ 27 | conversation_id, 28 | }); 29 | for (const request of results) { 30 | console.log(request); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/api/client.ts: -------------------------------------------------------------------------------- 1 | import createClient, { FetchOptions, FetchResponse } from 'openapi-fetch'; 2 | 3 | import { FilterKeys } from '../utils/types.js'; 4 | 5 | import { components, paths } from './schema.js'; 6 | 7 | export type ApiClient = ReturnType>; 8 | 9 | export function createApiClient( 10 | ...params: Parameters> 11 | ): ApiClient { 12 | return createClient(...params); 13 | } 14 | 15 | export type ApiClientOptions< 16 | Method extends keyof ApiClient, 17 | Path extends Parameters[0], 18 | > = FetchOptions>>; 19 | 20 | export type ApiClientResponse< 21 | Method extends keyof ApiClient, 22 | Path extends Parameters[0] = Parameters< 23 | ApiClient[Method] 24 | >[0], 25 | > = FetchResponse>>; 26 | 27 | export type ApiError = components['schemas']['BaseErrorResponse']; 28 | -------------------------------------------------------------------------------- /src/errors.ts: -------------------------------------------------------------------------------- 1 | import { AbortError as _AbortError } from 'p-queue-compat'; 2 | 3 | import type { ApiError } from './api/client.js'; 4 | 5 | export class BaseError extends Error {} 6 | 7 | export class InvalidInputError extends BaseError {} 8 | 9 | export class InternalError extends BaseError {} 10 | 11 | export abstract class RequestError extends BaseError {} 12 | 13 | export class NetworkError extends RequestError {} 14 | 15 | export class HttpError extends RequestError implements ApiError { 16 | readonly error: ApiError['error']; 17 | readonly status_code: ApiError['status_code']; 18 | readonly extensions: ApiError['extensions']; 19 | 20 | constructor(error: ApiError) { 21 | super(error.message, { cause: error }); 22 | this.error = error.error; 23 | this.status_code = error.status_code; 24 | this.extensions = error.extensions; 25 | } 26 | } 27 | 28 | // cjs compilation bug 29 | export const AbortError: typeof _AbortError = _AbortError 30 | ? _AbortError 31 | : class AbortError extends Error {}; 32 | -------------------------------------------------------------------------------- /tests/e2e/services/FileService.test.ts: -------------------------------------------------------------------------------- 1 | import { FileService } from '../../../src/services/FileService.js'; 2 | import { Client } from '../../../src/client.js'; 3 | 4 | describe('FileService', () => { 5 | let fileService: FileService; 6 | beforeAll(() => { 7 | fileService = new Client({ 8 | endpoint: process.env.ENDPOINT, 9 | apiKey: process.env.API_KEY, 10 | }).file; 11 | }); 12 | 13 | test('should upload, download and delete a file', async () => { 14 | const content = JSON.stringify('foobar'); 15 | const filename = 'foobar.json'; 16 | const type = 'application/json'; 17 | const file = await fileService.create({ 18 | purpose: 'template', 19 | file: { 20 | content: new Blob([content], { type }), 21 | name: filename, 22 | }, 23 | }); 24 | expect(file.result.file_name).toEqual(filename); 25 | const blob = await fileService.read({ id: file.result.id }); 26 | expect(await blob.text()).toEqual(content); 27 | await expect(fileService.delete({ id: file.result.id })).toResolve(); 28 | }); 29 | }); 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 International Business Machines 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /examples/langchain/llm-chat.ts: -------------------------------------------------------------------------------- 1 | import { HumanMessage } from '@langchain/core/messages'; 2 | 3 | import { GenAIChatModel } from '../../src/langchain/llm-chat.js'; 4 | import { Client } from '../../src/index.js'; 5 | 6 | const makeClient = () => 7 | new GenAIChatModel({ 8 | model_id: 'meta-llama/llama-3-70b-instruct', 9 | client: new Client({ 10 | endpoint: process.env.ENDPOINT, 11 | apiKey: process.env.API_KEY, 12 | }), 13 | parameters: { 14 | decoding_method: 'greedy', 15 | min_new_tokens: 1, 16 | max_new_tokens: 25, 17 | repetition_penalty: 1.5, 18 | }, 19 | }); 20 | 21 | { 22 | // Basic 23 | const chat = makeClient(); 24 | 25 | const response = await chat.invoke([ 26 | new HumanMessage( 27 | 'What is a good name for a company that makes colorful socks?', 28 | ), 29 | ]); 30 | 31 | console.log(response); 32 | } 33 | 34 | { 35 | // Streaming 36 | const chat = makeClient(); 37 | 38 | await chat.invoke([new HumanMessage('Tell me a joke.')], { 39 | callbacks: [ 40 | { 41 | handleLLMNewToken(token) { 42 | console.log(token); 43 | }, 44 | }, 45 | ], 46 | }); 47 | } 48 | -------------------------------------------------------------------------------- /.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | root: true, 3 | env: { 4 | node: true, 5 | es2022: true, 6 | }, 7 | extends: [ 8 | 'eslint:recommended', 9 | 'plugin:@typescript-eslint/recommended', 10 | 'plugin:import/recommended', 11 | 'plugin:import/typescript', 12 | 'prettier', 13 | ], 14 | overrides: [], 15 | parser: '@typescript-eslint/parser', 16 | parserOptions: { 17 | project: ['./tsconfig.json'], 18 | sourceType: 'module', 19 | }, 20 | plugins: ['@typescript-eslint', 'import'], 21 | rules: { 22 | 'arrow-body-style': ['error', 'as-needed'], 23 | 'prefer-arrow-callback': 'off', 24 | '@typescript-eslint/no-explicit-any': 'off', 25 | 'no-unused-vars': 'off', 26 | '@typescript-eslint/no-unused-vars': [ 27 | 'warn', 28 | { 29 | argsIgnorePattern: '^_', 30 | varsIgnorePattern: '^_', 31 | caughtErrorsIgnorePattern: '^_', 32 | }, 33 | ], 34 | 'import/order': [ 35 | 'error', 36 | { 37 | 'newlines-between': 'always', 38 | }, 39 | ], 40 | }, 41 | settings: { 42 | 'import/resolver': { 43 | typescript: true, 44 | node: true, 45 | }, 46 | }, 47 | }; 48 | -------------------------------------------------------------------------------- /src/services/ModelService.ts: -------------------------------------------------------------------------------- 1 | import { clientErrorWrapper } from '../utils/errors.js'; 2 | import { Options } from '../client.js'; 3 | import { 4 | ModelServiceListInput, 5 | ModelServiceListOutput, 6 | ModelServiceRetrieveInput, 7 | ModelServiceRetrieveOutput, 8 | } from '../schema.js'; 9 | 10 | import { BaseService } from './BaseService.js'; 11 | 12 | export class ModelService extends BaseService { 13 | async list( 14 | input: ModelServiceListInput, 15 | opts?: Options, 16 | ): Promise { 17 | return await clientErrorWrapper( 18 | this._client.GET('/v2/models', { 19 | ...opts, 20 | params: { 21 | query: { 22 | ...input, 23 | version: '2023-11-22', 24 | }, 25 | }, 26 | }), 27 | ); 28 | } 29 | 30 | async retrieve( 31 | input: ModelServiceRetrieveInput, 32 | opts?: Options, 33 | ): Promise { 34 | return clientErrorWrapper( 35 | this._client.GET('/v2/models/{id}', { 36 | ...opts, 37 | params: { 38 | path: input, 39 | query: { 40 | version: '2024-01-30', 41 | }, 42 | }, 43 | }), 44 | ); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /examples/chat.ts: -------------------------------------------------------------------------------- 1 | import { Client } from '../src/index.js'; 2 | 3 | import { CHAT_MODEL } from './shared/constants.js'; 4 | 5 | const client = new Client({ 6 | apiKey: process.env.GENAI_API_KEY, 7 | }); 8 | 9 | { 10 | // Start a conversation 11 | const { conversation_id, results: results1 } = await client.text.chat.create({ 12 | model_id: CHAT_MODEL, 13 | messages: [ 14 | { 15 | role: 'system', 16 | content: 'Answer yes or no', 17 | }, 18 | { 19 | role: 'user', 20 | content: 'Hello, are you a robot?', 21 | }, 22 | ], 23 | }); 24 | console.log(results1[0]); 25 | 26 | // Continue the conversation 27 | const { results: results2 } = await client.text.chat.create({ 28 | conversation_id, 29 | model_id: CHAT_MODEL, 30 | messages: [ 31 | { 32 | role: 'user', 33 | content: 'Are you sure?', 34 | }, 35 | ], 36 | }); 37 | console.log(results2[0]); 38 | } 39 | 40 | { 41 | // Stream 42 | const stream = await client.text.chat.create_stream({ 43 | model_id: CHAT_MODEL, 44 | messages: [{ role: 'user', content: 'How are you?' }], 45 | }); 46 | for await (const chunk of stream) { 47 | console.log(chunk.results?.at(0)?.generated_text); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/services/text/TextEmbeddingService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../../client.js'; 2 | import { clientErrorWrapper } from '../../utils/errors.js'; 3 | import { 4 | TextEmbeddingCreateInput, 5 | TextEmbeddingCreateOutput, 6 | } from '../../schema.js'; 7 | import { ApiClient } from '../../api/client.js'; 8 | import { SteamingApiClient } from '../../api/streaming-client.js'; 9 | import { ConcurrencyLimiter } from '../../utils/concurrency.js'; 10 | import { BaseService } from '../BaseService.js'; 11 | 12 | export class TextEmbeddingService extends BaseService { 13 | constructor( 14 | protected readonly _client: ApiClient, 15 | protected readonly _streamingClient: SteamingApiClient, 16 | protected readonly _limiter: ConcurrencyLimiter, 17 | ) { 18 | super(_client, _streamingClient); 19 | } 20 | 21 | create( 22 | input: TextEmbeddingCreateInput, 23 | opts?: Options, 24 | ): Promise { 25 | return this._limiter.execute( 26 | () => 27 | clientErrorWrapper( 28 | this._client.POST('/v2/text/embeddings', { 29 | ...opts, 30 | params: { query: { version: '2024-04-15' } }, 31 | body: input, 32 | }), 33 | ), 34 | { signal: opts?.signal }, 35 | ); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/langchain/prompt-template.test.ts: -------------------------------------------------------------------------------- 1 | import { PromptTemplate } from '@langchain/core/prompts'; 2 | 3 | import { InvalidInputError } from '../errors.js'; 4 | 5 | import { GenAIPromptTemplate } from './prompt-template.js'; 6 | 7 | describe('Prompt Templates', () => { 8 | it('throws when unknown template type passed', () => { 9 | expect(() => { 10 | const template = PromptTemplate.fromTemplate(`Hello {name}`); 11 | (template.templateFormat as string) = 'unknown'; 12 | GenAIPromptTemplate.fromLangChain(template); 13 | }).toThrow(InvalidInputError); 14 | }); 15 | 16 | it('converts LangChain Prompt Template (f-string) to GenAI Prompt Template', () => { 17 | const result = GenAIPromptTemplate.fromLangChain( 18 | PromptTemplate.fromTemplate( 19 | `Tell me a {adjective} joke about {content}.`, 20 | ), 21 | ); 22 | expect(result).toMatchInlineSnapshot( 23 | '"Tell me a {{adjective}} joke about {{content}}."', 24 | ); 25 | }); 26 | 27 | it('converts GenAI Prompt Template to GenAI Prompt Template', () => { 28 | const result = GenAIPromptTemplate.toLangChain( 29 | `Tell me a {{adjective}} joke about {{content}}.`, 30 | ); 31 | expect(result.template).toMatchInlineSnapshot( 32 | '"Tell me a {adjective} joke about {content}."', 33 | ); 34 | }); 35 | }); 36 | -------------------------------------------------------------------------------- /examples/file.ts: -------------------------------------------------------------------------------- 1 | import { createReadStream } from 'node:fs'; 2 | import { blob } from 'node:stream/consumers'; 3 | 4 | import { Client } from '../src/index.js'; 5 | 6 | const client = new Client({ 7 | apiKey: process.env.GENAI_API_KEY, 8 | }); 9 | 10 | { 11 | // List all files 12 | let totalCount = Infinity; 13 | const limit = 100; 14 | for (let offset = 0; offset < totalCount; offset += limit) { 15 | const { results, total_count } = await client.file.list({ 16 | limit, 17 | offset, 18 | }); 19 | for (const file of results) { 20 | console.log(file); 21 | } 22 | totalCount = total_count; 23 | } 24 | } 25 | 26 | { 27 | // Upload a file 28 | const { result } = await client.file.create({ 29 | purpose: 'tune', 30 | file: { 31 | name: 'tune_input.jsonl', 32 | content: (await blob( 33 | createReadStream('examples/assets/tune_input.jsonl'), 34 | )) as any, 35 | }, 36 | }); 37 | console.log(result); 38 | 39 | // Show details of a file 40 | const file = await client.file.retrieve({ id: result.id }); 41 | console.log(file); 42 | 43 | // Download the file's content 44 | const content = await client.file.read({ id: result.id }); 45 | console.log(await content.text()); 46 | 47 | // Delete the file 48 | await client.file.delete({ id: result.id }); 49 | } 50 | -------------------------------------------------------------------------------- /src/langchain/prompt-template.ts: -------------------------------------------------------------------------------- 1 | import { PromptTemplate as LangChainPromptTemplate } from '@langchain/core/prompts'; 2 | 3 | import { InvalidInputError } from '../errors.js'; 4 | 5 | export class GenAIPromptTemplate { 6 | static toLangChain(body: string): LangChainPromptTemplate { 7 | const fString = body.replace( 8 | GenAIPromptTemplate.getTemplateMatcher('mustache'), 9 | '{$1}', 10 | ); 11 | return LangChainPromptTemplate.fromTemplate(fString, { 12 | templateFormat: 'f-string', 13 | validateTemplate: true, 14 | }); 15 | } 16 | 17 | static fromLangChain(template: LangChainPromptTemplate): string { 18 | if (typeof template.template !== 'string') 19 | throw new Error('Unsupported template type'); 20 | 21 | return template.template.replace( 22 | GenAIPromptTemplate.getTemplateMatcher(template.templateFormat), 23 | '{{$1}}', 24 | ); 25 | } 26 | 27 | private static getTemplateMatcher(name: string) { 28 | switch (name) { 29 | case 'mustache': 30 | return /\{\{([^}]+)\}\}/g; 31 | case 'jinja2': 32 | return /\{\{\s*(.*?)\s*\}\}/g; 33 | case 'fstring': 34 | case 'f-string': 35 | return /\{([^}]+)\}/g; 36 | default: { 37 | throw new InvalidInputError(`Unknown template format "${name}".`); 38 | } 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/services/text/TextSentenceSimilarityService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../../client.js'; 2 | import { clientErrorWrapper } from '../../utils/errors.js'; 3 | import { 4 | TextSentenceSimilarityCreateInput, 5 | TextSentenceSimilarityCreateOutput, 6 | } from '../../schema.js'; 7 | import { ApiClient } from '../../api/client.js'; 8 | import { SteamingApiClient } from '../../api/streaming-client.js'; 9 | import { ConcurrencyLimiter } from '../../utils/concurrency.js'; 10 | import { BaseService } from '../BaseService.js'; 11 | 12 | export class TextSentenceSimilarityService extends BaseService { 13 | constructor( 14 | protected readonly _client: ApiClient, 15 | protected readonly _streamingClient: SteamingApiClient, 16 | protected readonly _limiter: ConcurrencyLimiter, 17 | ) { 18 | super(_client, _streamingClient); 19 | } 20 | 21 | create( 22 | input: TextSentenceSimilarityCreateInput, 23 | opts?: Options, 24 | ): Promise { 25 | return this._limiter.execute( 26 | () => 27 | clientErrorWrapper( 28 | this._client.POST('/v2/beta/text/sentence-similarity', { 29 | ...opts, 30 | params: { query: { version: '2023-11-22' } }, 31 | body: input, 32 | }), 33 | ), 34 | { signal: opts?.signal }, 35 | ); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/utils/concurrency.ts: -------------------------------------------------------------------------------- 1 | import PQueue, { QueueAddOptions } from 'p-queue-compat'; 2 | 3 | import { HttpError } from '../errors.js'; 4 | 5 | type Limiter = () => Promise<{ limit: number }>; 6 | type Task = () => Promise; 7 | 8 | function isConcurrencyLimitError(err: unknown): err is HttpError { 9 | return ( 10 | err instanceof HttpError && err.extensions?.code === 'TOO_MANY_REQUESTS' 11 | ); 12 | } 13 | 14 | export class ConcurrencyLimiter { 15 | private _queue?: PQueue; 16 | private _limiterPromise?: ReturnType; 17 | 18 | constructor(private readonly limiter: Limiter) {} 19 | 20 | async execute(task: Task, options?: Partial) { 21 | await this._initQueue(); 22 | // eslint-disable-next-line no-constant-condition 23 | while (true) { 24 | try { 25 | return await this._queue!.add(task, { 26 | ...options, 27 | throwOnTimeout: true, 28 | }); 29 | } catch (err) { 30 | if (isConcurrencyLimitError(err)) continue; 31 | throw err; 32 | } 33 | } 34 | } 35 | 36 | protected async _initQueue(): Promise { 37 | if (this._queue) return; 38 | if (this._limiterPromise) { 39 | await this._limiterPromise; 40 | return; 41 | } 42 | 43 | this._limiterPromise = this.limiter(); 44 | const { limit } = await this._limiterPromise; 45 | this._queue = new PQueue({ concurrency: limit }); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/utils/stream.ts: -------------------------------------------------------------------------------- 1 | import { Readable } from 'stream'; 2 | 3 | export class TypedReadable extends Readable { 4 | // eslint-disable-next-line @typescript-eslint/no-unused-vars 5 | _read(size: number) { 6 | /* empty */ 7 | } 8 | 9 | addListener(event: 'close', listener: () => void): this; 10 | addListener(event: 'data', listener: (chunk: T) => void): this; 11 | addListener(event: 'end', listener: () => void): this; 12 | addListener(event: 'error', listener: (err: Error) => void): this; 13 | addListener(event: 'pause', listener: () => void): this; 14 | addListener(event: 'readable', listener: () => void): this; 15 | addListener(event: 'resume', listener: () => void): this; 16 | addListener( 17 | event: string | symbol, 18 | listener: (...args: any[]) => void, 19 | ): this { 20 | return super.addListener(event, listener); 21 | } 22 | 23 | on(event: 'close', listener: () => void): this; 24 | on(event: 'data', listener: (chunk: T) => void): this; 25 | on(event: 'end', listener: () => void): this; 26 | on(event: 'error', listener: (err: Error) => void): this; 27 | on(event: 'pause', listener: () => void): this; 28 | on(event: 'readable', listener: () => void): this; 29 | on(event: 'resume', listener: () => void): this; 30 | on(event: string | symbol, listener: (...args: any[]) => void): this { 31 | return super.on(event, listener); 32 | } 33 | 34 | [Symbol.asyncIterator](): AsyncIterableIterator { 35 | return super[Symbol.asyncIterator](); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/services/RequestService.ts: -------------------------------------------------------------------------------- 1 | import { clientErrorWrapper } from '../utils/errors.js'; 2 | import { Options } from '../client.js'; 3 | import { 4 | RequestServiceChatInput, 5 | RequestServiceChatOutput, 6 | RequestServiceDeleteInput, 7 | RequestServiceDeleteOutput, 8 | RequestServiceListInput, 9 | RequestServiceListOutput, 10 | } from '../schema.js'; 11 | 12 | import { BaseService } from './BaseService.js'; 13 | 14 | export class RequestService extends BaseService { 15 | async list( 16 | input: RequestServiceListInput, 17 | opts?: Options, 18 | ): Promise { 19 | return clientErrorWrapper( 20 | this._client.GET('/v2/requests', { 21 | ...opts, 22 | params: { query: { ...input, version: '2023-11-22' } }, 23 | }), 24 | ); 25 | } 26 | 27 | async delete( 28 | input: RequestServiceDeleteInput, 29 | opts?: Options, 30 | ): Promise { 31 | return clientErrorWrapper( 32 | this._client.DELETE('/v2/requests/{id}', { 33 | ...opts, 34 | params: { path: input, query: { version: '2023-11-22' } }, 35 | }), 36 | ); 37 | } 38 | 39 | async chat( 40 | input: RequestServiceChatInput, 41 | opts?: Options, 42 | ): Promise { 43 | return clientErrorWrapper( 44 | this._client.GET('/v2/requests/chat/{conversation_id}', { 45 | ...opts, 46 | params: { path: input, query: { version: '2024-03-19' } }, 47 | }), 48 | ); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/utils/errors.ts: -------------------------------------------------------------------------------- 1 | import { FetchResponse } from 'openapi-fetch'; 2 | 3 | import { 4 | HttpError, 5 | InternalError, 6 | NetworkError, 7 | AbortError, 8 | } from '../errors.js'; 9 | 10 | export function isAbortError(err: unknown): err is DOMException { 11 | return Boolean( 12 | err && 13 | err instanceof Error && 14 | (err.name === 'AbortError' || 15 | err instanceof AbortError || 16 | (err as any).code === 'ABORT_ERR'), 17 | ); 18 | } 19 | 20 | const ServiceUnavailableErrorCodes = new Set([ 21 | 'ENOTFOUND', 22 | 'ETIMEDOUT', 23 | 'ECONNRESET', 24 | 'EHOSTDOWN', 25 | 'ECONNREFUSED', 26 | 'ENETUNREACH', // macOS 27 | 'EHOSTUNREACH', // Linux 28 | 'UND_ERR_CONNECT_TIMEOUT', 29 | 'EAI_AGAIN', 30 | ]); 31 | function isServiceError(err: unknown) { 32 | const code = (err as any)?.code; 33 | return !!code && ServiceUnavailableErrorCodes.has(code); 34 | } 35 | 36 | export async function clientErrorWrapper( 37 | request: Promise>, 38 | ): Promise, { data?: never }>['data']> { 39 | try { 40 | const response = await request; 41 | if (response.error != undefined) { 42 | throw new HttpError( 43 | response.error as Exclude, 44 | ); 45 | } 46 | return response.data!; 47 | } catch (err) { 48 | if (err instanceof HttpError) throw err; 49 | if (isAbortError(err)) throw err; 50 | if (isServiceError(err)) 51 | throw new NetworkError('Unable to connect', { cause: err }); 52 | throw new InternalError('Request failed', { cause: err }); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /examples/langchain/llm.ts: -------------------------------------------------------------------------------- 1 | import { Client } from '../../src/index.js'; 2 | import { GenAIModel } from '../../src/langchain/index.js'; 3 | 4 | const makeClient = () => 5 | new GenAIModel({ 6 | model_id: 'google/flan-t5-xl', 7 | client: new Client({ 8 | endpoint: process.env.ENDPOINT, 9 | apiKey: process.env.API_KEY, 10 | }), 11 | parameters: { 12 | decoding_method: 'greedy', 13 | min_new_tokens: 5, 14 | max_new_tokens: 25, 15 | repetition_penalty: 1.5, 16 | }, 17 | moderations: { 18 | hap: { 19 | input: { 20 | enabled: true, 21 | threshold: 0.75, 22 | }, 23 | output: { 24 | enabled: true, 25 | threshold: 0.75, 26 | }, 27 | }, 28 | }, 29 | }); 30 | 31 | { 32 | // Basic 33 | console.info('---Single Input Example---'); 34 | const model = makeClient(); 35 | 36 | const prompt = 'What is a good name for a company that makes colorful socks?'; 37 | console.info(`Request: ${prompt}`); 38 | const response = await model.invoke(prompt); 39 | console.log(`Response: ${response}`); 40 | } 41 | 42 | { 43 | console.info('---Multiple Inputs Example---'); 44 | const model = makeClient(); 45 | 46 | const prompts = ['What is IBM?', 'What is WatsonX?']; 47 | console.info('Request prompts:', prompts); 48 | const response = await model.generate(prompts); 49 | console.info('Response:', response); 50 | } 51 | 52 | { 53 | console.info('---Streaming Example---'); 54 | const chat = makeClient(); 55 | 56 | const prompt = 'What is a molecule?'; 57 | console.info(`Request: ${prompt}`); 58 | for await (const token of await chat.stream(prompt)) { 59 | console.info(`Received token: ${token}`); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /tests/integration/errors.test.ts: -------------------------------------------------------------------------------- 1 | import fetch from 'cross-fetch'; 2 | 3 | import { createApiClient } from '../../src/api/client.js'; 4 | import { BaseError, HttpError, NetworkError } from '../../src/errors.js'; 5 | import { clientErrorWrapper } from '../../src/utils/errors.js'; 6 | import { MOCK_ENDPOINT } from '../mocks/handlers.js'; 7 | 8 | describe('errors', () => { 9 | test('should fail with network error', async () => { 10 | const client = createApiClient({ baseUrl: 'http://invalidhost', fetch }); 11 | await expect( 12 | clientErrorWrapper( 13 | client.GET('/v2/models', { 14 | params: { query: { limit: 100, offset: 0, version: '2023-11-22' } }, 15 | }), 16 | ), 17 | ).rejects.toBeInstanceOf(NetworkError); 18 | }); 19 | 20 | test('should fail with http error', async () => { 21 | const client = createApiClient({ baseUrl: MOCK_ENDPOINT, fetch }); 22 | await expect( 23 | clientErrorWrapper( 24 | client.GET('/error' as '/v2/models', { 25 | params: { query: { limit: 100, offset: 0, version: '2023-11-22' } }, 26 | }), 27 | ), 28 | ).rejects.toBeInstanceOf(HttpError); 29 | }); 30 | 31 | test('should fail with abort error', async () => { 32 | const client = createApiClient({ baseUrl: MOCK_ENDPOINT, fetch }); 33 | const controller = new AbortController(); 34 | controller.abort(); 35 | 36 | const promise = clientErrorWrapper( 37 | client.GET('/v2/models', { 38 | params: { query: { limit: 100, offset: 0, version: '2023-11-22' } }, 39 | signal: controller.signal, 40 | }), 41 | ); 42 | 43 | await expect(promise).rejects.toThrowError('The user aborted a request.'); 44 | await expect(promise).rejects.not.toBeInstanceOf(BaseError); 45 | }); 46 | }); 47 | -------------------------------------------------------------------------------- /src/services/text/TextChatService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../../client.js'; 2 | import { clientErrorWrapper } from '../../utils/errors.js'; 3 | import { 4 | TextChatCreateInput, 5 | TextChatCreateOutput, 6 | TextChatCreateStreamInput, 7 | TextChatCreateStreamOutput, 8 | } from '../../schema.js'; 9 | import { TypedReadable } from '../../utils/stream.js'; 10 | import { BaseService } from '../BaseService.js'; 11 | import { ApiClient } from '../../api/client.js'; 12 | import { SteamingApiClient } from '../../api/streaming-client.js'; 13 | import { ConcurrencyLimiter } from '../../utils/concurrency.js'; 14 | 15 | export class TextChatService extends BaseService { 16 | constructor( 17 | protected readonly _client: ApiClient, 18 | protected readonly _streamingClient: SteamingApiClient, 19 | protected readonly _limiter: ConcurrencyLimiter, 20 | ) { 21 | super(_client, _streamingClient); 22 | } 23 | 24 | create( 25 | input: TextChatCreateInput, 26 | opts?: Options, 27 | ): Promise { 28 | return this._limiter.execute( 29 | () => 30 | clientErrorWrapper( 31 | this._client.POST('/v2/text/chat', { 32 | ...opts, 33 | params: { query: { version: '2024-03-19' } }, 34 | body: input, 35 | }), 36 | ), 37 | { signal: opts?.signal }, 38 | ); 39 | } 40 | 41 | create_stream( 42 | input: TextChatCreateStreamInput, 43 | opts?: Options, 44 | ): Promise> { 45 | return this._limiter.execute( 46 | async () => 47 | this._streamingClient.stream({ 48 | url: '/v2/text/chat_stream?version=2024-03-19', 49 | body: input, 50 | signal: opts?.signal, 51 | }), 52 | { signal: opts?.signal }, 53 | ); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/services/text/TextGenerationService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../../client.js'; 2 | import { clientErrorWrapper } from '../../utils/errors.js'; 3 | import { 4 | TextGenerationCreateInput, 5 | TextGenerationCreateOutput, 6 | TextGenerationCreateStreamInput, 7 | TextGenerationCreateStreamOutput, 8 | } from '../../schema.js'; 9 | import { TypedReadable } from '../../utils/stream.js'; 10 | import { ApiClient } from '../../api/client.js'; 11 | import { SteamingApiClient } from '../../api/streaming-client.js'; 12 | import { ConcurrencyLimiter } from '../../utils/concurrency.js'; 13 | import { BaseService } from '../BaseService.js'; 14 | 15 | export class TextGenerationService extends BaseService { 16 | constructor( 17 | protected readonly _client: ApiClient, 18 | protected readonly _streamingClient: SteamingApiClient, 19 | protected readonly _limiter: ConcurrencyLimiter, 20 | ) { 21 | super(_client, _streamingClient); 22 | } 23 | 24 | async create( 25 | input: TextGenerationCreateInput, 26 | opts?: Options, 27 | ): Promise { 28 | return this._limiter.execute( 29 | () => 30 | clientErrorWrapper( 31 | this._client.POST('/v2/text/generation', { 32 | ...opts, 33 | params: { query: { version: '2024-03-19' } }, 34 | body: input, 35 | }), 36 | ), 37 | { signal: opts?.signal }, 38 | ); 39 | } 40 | 41 | create_stream( 42 | input: TextGenerationCreateStreamInput, 43 | opts?: Options, 44 | ): Promise> { 45 | return this._limiter.execute( 46 | async () => 47 | this._streamingClient.stream({ 48 | url: '/v2/text/generation_stream?version=2024-03-19', 49 | body: input, 50 | signal: opts?.signal, 51 | }), 52 | { signal: opts?.signal }, 53 | ); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /examples/tune.ts: -------------------------------------------------------------------------------- 1 | import { blob } from 'node:stream/consumers'; 2 | import { createReadStream } from 'node:fs'; 3 | 4 | import { Client } from '../src/index.js'; 5 | 6 | const client = new Client({ 7 | apiKey: process.env.GENAI_API_KEY, 8 | }); 9 | 10 | { 11 | // List all completed tunes 12 | let totalCount = Infinity; 13 | const limit = 100; 14 | for (let offset = 0; offset < totalCount; offset += limit) { 15 | const { results, total_count } = await client.tune.list({ 16 | limit, 17 | offset, 18 | status: 'completed', 19 | }); 20 | for (const file of results) { 21 | console.log(file); 22 | } 23 | totalCount = total_count; 24 | } 25 | } 26 | 27 | { 28 | // List available tune methods 29 | const { results: tuneTypes } = await client.tune.types({}); 30 | console.log(tuneTypes); 31 | 32 | // Upload file for tuning 33 | const { result: file } = await client.file.create({ 34 | purpose: 'tune', 35 | file: { 36 | name: 'tune_input.jsonl', 37 | content: (await blob( 38 | createReadStream('examples/assets/tune_input.jsonl'), 39 | )) as any, 40 | }, 41 | }); 42 | 43 | // Create a tune 44 | const { result: createdTune } = await client.tune.create({ 45 | name: 'Awesome Tune', 46 | tuning_type: 'prompt_tuning', 47 | model_id: 'google/flan-t5-xl', 48 | task_id: 'generation', 49 | training_file_ids: [file.id], 50 | }); 51 | console.log(createdTune); 52 | 53 | // Show details of the tune 54 | const { result: retrievedTune } = await client.tune.retrieve({ 55 | id: createdTune.id, 56 | }); 57 | console.log(retrievedTune); 58 | 59 | // Download tune's assets when completed 60 | if (retrievedTune.status === 'completed') { 61 | const logs = await client.tune.read({ id: retrievedTune.id, type: 'logs' }); 62 | console.log(await logs.text()); 63 | } 64 | 65 | // Delete the tune 66 | await client.tune.delete({ id: createdTune.id }); 67 | 68 | // Detele the file 69 | await client.file.delete({ id: file.id }); 70 | } 71 | -------------------------------------------------------------------------------- /src/helpers/config.test.ts: -------------------------------------------------------------------------------- 1 | import fs from 'fs'; 2 | import path from 'path'; 3 | import os from 'os'; 4 | 5 | import { lookupApiKey, lookupEndpoint } from './config.js'; 6 | 7 | vi.mock('fs'); 8 | 9 | describe('Helpers', () => { 10 | afterAll(() => { 11 | vi.unstubAllEnvs(); 12 | }); 13 | 14 | describe('endpointLookup', () => { 15 | const EXPECTED_ENDPOINT = 'https://foobar'; 16 | 17 | afterEach(() => { 18 | vi.stubEnv('GENAI_ENDPOINT', ''); 19 | }); 20 | 21 | test('should read endpoint from the env variable', () => { 22 | process.env.GENAI_ENDPOINT = EXPECTED_ENDPOINT; 23 | const endpoint = lookupEndpoint(); 24 | expect(endpoint).toEqual(EXPECTED_ENDPOINT); 25 | }); 26 | 27 | test('should use default endpoint', () => { 28 | const endpoint = lookupEndpoint(); 29 | expect(endpoint).toEqual(process.env.GENAI_DEFAULT_ENDPOINT); 30 | }); 31 | }); 32 | 33 | describe('apiKeyLookup', () => { 34 | const EXPECTED_API_KEY = 'foobar'; 35 | 36 | afterEach(() => { 37 | vi.stubEnv('GENAI_API_KEY', ''); 38 | }); 39 | 40 | test('should read apiKey from the env variable', () => { 41 | process.env.GENAI_API_KEY = EXPECTED_API_KEY; 42 | const apiKey = lookupApiKey(); 43 | expect(apiKey).toEqual(EXPECTED_API_KEY); 44 | }); 45 | 46 | test('should read apiKey from the user configuration', () => { 47 | delete process.env.GENAI_API_KEY; 48 | const expectedPath = path.join(os.homedir(), '.genai', 'credentials.yml'); 49 | vi.spyOn(fs, 'existsSync').mockReturnValueOnce(true); 50 | const spy = vi 51 | .spyOn(fs, 'readFileSync') 52 | .mockReturnValueOnce(`apiKey: ${EXPECTED_API_KEY}`); 53 | const apiKey = lookupApiKey(); 54 | expect(spy).toBeCalledWith(expectedPath, 'utf8'); 55 | expect(apiKey).toEqual(EXPECTED_API_KEY); 56 | }); 57 | 58 | test('should not read anything', () => { 59 | const apiKey = lookupApiKey(); 60 | expect(apiKey).toEqual(null); 61 | }); 62 | }); 63 | }); 64 | -------------------------------------------------------------------------------- /src/client.test.ts: -------------------------------------------------------------------------------- 1 | import { lookupApiKey, lookupEndpoint } from './helpers/config.js'; 2 | import { Client } from './client.js'; 3 | 4 | vi.mock('./helpers/config.js'); 5 | 6 | describe('client', () => { 7 | describe('configuration', () => { 8 | beforeEach(() => { 9 | vi.resetAllMocks(); 10 | }); 11 | 12 | test('should find an endpoint if one is not provided', () => { 13 | vi.mocked(lookupEndpoint).mockReturnValueOnce('https://foobar'); 14 | const client = new Client({ 15 | apiKey: process.env.API_KEY, 16 | }); 17 | expect(lookupEndpoint).toHaveBeenCalled(); 18 | expect(client).toBeDefined(); 19 | }); 20 | 21 | test("should throw if endpoint is not provided and can't be found", () => { 22 | vi.mocked(lookupEndpoint).mockReturnValueOnce(null); 23 | expect(() => new Client({ apiKey: process.env.API_KEY })).toThrowError( 24 | 'endpoint is missing', 25 | ); 26 | expect(lookupEndpoint).toHaveBeenCalled(); 27 | }); 28 | 29 | test('should find an api key if one is not provided', () => { 30 | vi.mocked(lookupApiKey).mockReturnValueOnce('foobar'); 31 | const client = new Client({ 32 | endpoint: process.env.ENDPOINT, 33 | }); 34 | expect(lookupApiKey).toHaveBeenCalled(); 35 | expect(client).toBeDefined(); 36 | }); 37 | 38 | test("should throw if api key is not provided and can't be found", () => { 39 | vi.mocked(lookupApiKey).mockReturnValueOnce(null); 40 | expect(() => new Client({ endpoint: process.env.ENDPOINT })).toThrowError( 41 | 'API key is missing', 42 | ); 43 | expect(lookupApiKey).toHaveBeenCalled(); 44 | }); 45 | 46 | test('should pass if all required configurations are provided', () => { 47 | const client = new Client({ 48 | endpoint: process.env.ENDPOINT, 49 | apiKey: process.env.API_KEY, 50 | }); 51 | expect(lookupEndpoint).not.toHaveBeenCalled(); 52 | expect(lookupApiKey).not.toHaveBeenCalled(); 53 | expect(client).toBeDefined(); 54 | }); 55 | }); 56 | }); 57 | -------------------------------------------------------------------------------- /src/services/UserService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../client.js'; 2 | import { clientErrorWrapper } from '../utils/errors.js'; 3 | import { 4 | UserServiceCreateInput, 5 | UserServiceCreateOutput, 6 | UserServiceDeleteInput, 7 | UserServiceDeleteOutput, 8 | UserServiceRetrieveInput, 9 | UserServiceRetrieveOutput, 10 | UserServiceUpdateInput, 11 | UserServiceUpdateOutput, 12 | } from '../schema.js'; 13 | 14 | import { BaseService } from './BaseService.js'; 15 | 16 | export class UserService extends BaseService { 17 | async create( 18 | input: UserServiceCreateInput, 19 | opts?: Options, 20 | ): Promise { 21 | return clientErrorWrapper( 22 | this._client.POST('/v2/user', { 23 | ...opts, 24 | body: input, 25 | params: { 26 | query: { 27 | version: '2023-11-22', 28 | }, 29 | }, 30 | }), 31 | ); 32 | } 33 | 34 | async retrieve( 35 | input: UserServiceRetrieveInput, 36 | opts?: Options, 37 | ): Promise { 38 | return clientErrorWrapper( 39 | this._client.GET('/v2/user', { 40 | ...opts, 41 | params: { 42 | query: { 43 | version: '2023-11-22', 44 | }, 45 | }, 46 | }), 47 | ); 48 | } 49 | 50 | async update( 51 | input: UserServiceUpdateInput, 52 | opts?: Options, 53 | ): Promise { 54 | return clientErrorWrapper( 55 | this._client.PATCH('/v2/user', { 56 | ...opts, 57 | params: { 58 | query: { 59 | version: '2023-11-22', 60 | }, 61 | }, 62 | body: input, 63 | }), 64 | ); 65 | } 66 | 67 | async delete( 68 | input: UserServiceDeleteInput, 69 | opts?: Options, 70 | ): Promise { 71 | return clientErrorWrapper( 72 | this._client.DELETE('/v2/user', { 73 | ...opts, 74 | params: { 75 | query: { 76 | version: '2023-11-22', 77 | }, 78 | }, 79 | }), 80 | ); 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/services/PromptService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../client.js'; 2 | import { clientErrorWrapper } from '../utils/errors.js'; 3 | import { 4 | PromptServiceCreateInput, 5 | PromptServiceCreateOutput, 6 | PromptServiceDeleteInput, 7 | PromptServiceDeleteOutput, 8 | PromptServiceListInput, 9 | PromptServiceListOutput, 10 | PromptServiceRetrieveInput, 11 | PromptServiceRetrieveOutput, 12 | } from '../schema.js'; 13 | 14 | import { BaseService } from './BaseService.js'; 15 | 16 | export class PromptService extends BaseService { 17 | async list( 18 | input: PromptServiceListInput, 19 | opts?: Options, 20 | ): Promise { 21 | return clientErrorWrapper( 22 | this._client.GET('/v2/prompts', { 23 | ...opts, 24 | params: { 25 | query: { 26 | ...input, 27 | version: '2024-03-19', 28 | }, 29 | }, 30 | }), 31 | ); 32 | } 33 | 34 | async retrieve( 35 | input: PromptServiceRetrieveInput, 36 | opts?: Options, 37 | ): Promise { 38 | return clientErrorWrapper( 39 | this._client.GET('/v2/prompts/{id}', { 40 | ...opts, 41 | params: { 42 | query: { 43 | version: '2024-03-19', 44 | }, 45 | path: input, 46 | }, 47 | }), 48 | ); 49 | } 50 | 51 | async create( 52 | input: PromptServiceCreateInput, 53 | opts?: Options, 54 | ): Promise { 55 | return clientErrorWrapper( 56 | this._client.POST('/v2/prompts', { 57 | ...opts, 58 | body: input, 59 | params: { 60 | query: { 61 | version: '2024-03-19', 62 | }, 63 | }, 64 | }), 65 | ); 66 | } 67 | 68 | async delete( 69 | input: PromptServiceDeleteInput, 70 | opts?: Options, 71 | ): Promise { 72 | return clientErrorWrapper( 73 | this._client.DELETE('/v2/prompts/{id}', { 74 | ...opts, 75 | params: { 76 | query: { 77 | version: '2023-11-22', 78 | }, 79 | path: input, 80 | }, 81 | }), 82 | ); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/services/text/TextService.ts: -------------------------------------------------------------------------------- 1 | import { BaseService } from '../BaseService.js'; 2 | import { ApiClient } from '../../api/client.js'; 3 | import { SteamingApiClient } from '../../api/streaming-client.js'; 4 | import { ConcurrencyLimiter } from '../../utils/concurrency.js'; 5 | import { clientErrorWrapper } from '../../utils/errors.js'; 6 | 7 | import { TextGenerationService } from './TextGenerationService.js'; 8 | import { TextTokenizationService } from './TextTokenizationService.js'; 9 | import { TextEmbeddingService } from './TextEmbeddingService.js'; 10 | import { TextChatService } from './TextChatService.js'; 11 | import { TextSentenceSimilarityService } from './TextSentenceSimilarityService.js'; 12 | 13 | export class TextService extends BaseService { 14 | public readonly generation: TextGenerationService; 15 | public readonly tokenization: TextTokenizationService; 16 | public readonly embedding: TextEmbeddingService; 17 | public readonly chat: TextChatService; 18 | public readonly experimental: { 19 | sentenceSimilarity: TextSentenceSimilarityService; 20 | }; 21 | 22 | constructor(client: ApiClient, streamingClient: SteamingApiClient) { 23 | super(client, streamingClient); 24 | 25 | const generationLimiter = new ConcurrencyLimiter(async () => { 26 | const { 27 | result: { concurrency }, 28 | } = await clientErrorWrapper( 29 | this._client.GET('/v2/text/generation/limits', { 30 | params: { query: { version: '2023-11-22' } }, 31 | }), 32 | ); 33 | return concurrency; 34 | }); 35 | const embeddingLimiter = new ConcurrencyLimiter(async () => { 36 | const { 37 | result: { concurrency }, 38 | } = await clientErrorWrapper( 39 | this._client.GET('/v2/text/embeddings/limits', { 40 | params: { query: { version: '2023-11-22' } }, 41 | }), 42 | ); 43 | return concurrency; 44 | }); 45 | 46 | this.generation = new TextGenerationService( 47 | client, 48 | streamingClient, 49 | generationLimiter, 50 | ); 51 | this.tokenization = new TextTokenizationService(client, streamingClient); 52 | this.embedding = new TextEmbeddingService( 53 | client, 54 | streamingClient, 55 | embeddingLimiter, 56 | ); 57 | this.chat = new TextChatService(client, streamingClient, generationLimiter); 58 | this.experimental = { 59 | sentenceSimilarity: new TextSentenceSimilarityService( 60 | client, 61 | streamingClient, 62 | embeddingLimiter, 63 | ), 64 | }; 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /.github/workflows/node.js.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: ['main'] 6 | pull_request: 7 | branches: ['main'] 8 | 9 | env: 10 | GENAI_DEFAULT_ENDPOINT: ${{ vars.GENAI_DEFAULT_ENDPOINT }} 11 | 12 | jobs: 13 | lint: 14 | runs-on: ubuntu-latest 15 | 16 | strategy: 17 | matrix: 18 | node-version: [18.x, 20.x] 19 | # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ 20 | 21 | steps: 22 | - uses: actions/checkout@v4 23 | - name: Use Node.js ${{ matrix.node-version }} 24 | uses: actions/setup-node@v4 25 | with: 26 | node-version: ${{ matrix.node-version }} 27 | cache: 'yarn' 28 | - run: yarn --frozen-lockfile 29 | - run: yarn lint 30 | 31 | test: 32 | runs-on: ubuntu-latest 33 | 34 | env: 35 | API_KEY: ${{ secrets.TEST_API_KEY }} 36 | ENDPOINT: ${{ vars.TEST_ENDPOINT }} 37 | 38 | strategy: 39 | matrix: 40 | node-version: [18.x] 41 | # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ 42 | 43 | steps: 44 | - uses: actions/checkout@v4 45 | - name: Use Node.js ${{ matrix.node-version }} 46 | uses: actions/setup-node@v4 47 | with: 48 | node-version: ${{ matrix.node-version }} 49 | cache: 'yarn' 50 | - run: yarn --frozen-lockfile 51 | - run: yarn test 52 | 53 | examples: 54 | runs-on: ubuntu-latest 55 | 56 | env: 57 | GENAI_API_KEY: ${{ secrets.TEST_API_KEY }} 58 | 59 | strategy: 60 | matrix: 61 | node-version: [18.18.x] 62 | # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ 63 | 64 | steps: 65 | - uses: actions/checkout@v4 66 | - name: Use Node.js ${{ matrix.node-version }} 67 | uses: actions/setup-node@v4 68 | with: 69 | node-version: ${{ matrix.node-version }} 70 | cache: 'yarn' 71 | - run: yarn --frozen-lockfile 72 | - run: yarn examples 73 | 74 | build: 75 | runs-on: ubuntu-latest 76 | 77 | strategy: 78 | matrix: 79 | node-version: [18.x, 20.x] 80 | # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ 81 | 82 | steps: 83 | - uses: actions/checkout@v4 84 | - name: Use Node.js ${{ matrix.node-version }} 85 | uses: actions/setup-node@v4 86 | with: 87 | node-version: ${{ matrix.node-version }} 88 | cache: 'yarn' 89 | - run: yarn --frozen-lockfile 90 | - run: yarn build 91 | -------------------------------------------------------------------------------- /src/services/TuneService.ts: -------------------------------------------------------------------------------- 1 | import { clientErrorWrapper } from '../utils/errors.js'; 2 | import { Options } from '../client.js'; 3 | import { 4 | TuneServiceCreateInput, 5 | TuneServiceCreateOutput, 6 | TuneServiceDeleteInput, 7 | TuneServiceDeleteOutput, 8 | TuneServiceListInput, 9 | TuneServiceListOutput, 10 | TuneServiceReadInput, 11 | TuneServiceReadOutput, 12 | TuneServiceRetrieveInput, 13 | TuneServiceRetrieveOutput, 14 | TuneServiceTypesInput, 15 | TuneServiceTypesOutput, 16 | } from '../schema.js'; 17 | 18 | import { BaseService } from './BaseService.js'; 19 | 20 | export class TuneService extends BaseService { 21 | async create( 22 | input: TuneServiceCreateInput, 23 | opts?: Options, 24 | ): Promise { 25 | return clientErrorWrapper( 26 | this._client.POST('/v2/tunes', { 27 | ...opts, 28 | params: { query: { version: '2023-11-22' } }, 29 | body: input, 30 | }), 31 | ); 32 | } 33 | 34 | async read( 35 | input: TuneServiceReadInput, 36 | opts?: Options, 37 | ): Promise { 38 | return clientErrorWrapper( 39 | this._client.GET('/v2/tunes/{id}/content/{type}', { 40 | ...opts, 41 | params: { path: input, query: { version: '2023-12-15' } }, 42 | parseAs: 'blob', 43 | }), 44 | ); 45 | } 46 | 47 | async retrieve( 48 | input: TuneServiceRetrieveInput, 49 | opts?: Options, 50 | ): Promise { 51 | return clientErrorWrapper( 52 | this._client.GET('/v2/tunes/{id}', { 53 | ...opts, 54 | params: { path: input, query: { version: '2023-11-22' } }, 55 | }), 56 | ); 57 | } 58 | 59 | async delete( 60 | input: TuneServiceDeleteInput, 61 | opts?: Options, 62 | ): Promise { 63 | return clientErrorWrapper( 64 | this._client.DELETE('/v2/tunes/{id}', { 65 | ...opts, 66 | params: { path: input, query: { version: '2023-11-22' } }, 67 | }), 68 | ); 69 | } 70 | 71 | async list( 72 | input: TuneServiceListInput, 73 | opts?: Options, 74 | ): Promise { 75 | return clientErrorWrapper( 76 | this._client.GET('/v2/tunes', { 77 | ...opts, 78 | params: { query: { ...input, version: '2023-11-22' } }, 79 | }), 80 | ); 81 | } 82 | 83 | async types( 84 | input: TuneServiceTypesInput, 85 | opts?: Options, 86 | ): Promise { 87 | return clientErrorWrapper( 88 | this._client.GET('/v2/tuning_types', { 89 | ...opts, 90 | params: { query: { version: '2024-01-30' } }, 91 | }), 92 | ); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/services/SystemPromptService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../client.js'; 2 | import { 3 | SystemPromptServiceCreateInput, 4 | SystemPromptServiceCreateOutput, 5 | SystemPromptServiceDeleteInput, 6 | SystemPromptServiceDeleteOutput, 7 | SystemPromptServiceListInput, 8 | SystemPromptServiceListOutput, 9 | SystemPromptServiceRetrieveInput, 10 | SystemPromptServiceRetrieveOutput, 11 | SystemPromptServiceUpdateInput, 12 | SystemPromptServiceUpdateOutput, 13 | } from '../schema.js'; 14 | import { clientErrorWrapper } from '../utils/errors.js'; 15 | 16 | import { BaseService } from './BaseService.js'; 17 | 18 | export class SystemPromptService extends BaseService { 19 | async create( 20 | input: SystemPromptServiceCreateInput, 21 | opts?: Options, 22 | ): Promise { 23 | return clientErrorWrapper( 24 | this._client.POST('/v2/system_prompts', { 25 | ...opts, 26 | body: input, 27 | params: { 28 | query: { 29 | version: '2023-11-22', 30 | }, 31 | }, 32 | }), 33 | ); 34 | } 35 | 36 | async retrieve( 37 | input: SystemPromptServiceRetrieveInput, 38 | opts?: Options, 39 | ): Promise { 40 | return clientErrorWrapper( 41 | this._client.GET('/v2/system_prompts/{id}', { 42 | ...opts, 43 | params: { 44 | query: { 45 | version: '2023-11-22', 46 | }, 47 | path: input, 48 | }, 49 | }), 50 | ); 51 | } 52 | 53 | async update( 54 | input: SystemPromptServiceUpdateInput, 55 | opts?: Options, 56 | ): Promise { 57 | const { id, ...body } = input; 58 | return clientErrorWrapper( 59 | this._client.PUT('/v2/system_prompts/{id}', { 60 | ...opts, 61 | params: { 62 | query: { 63 | version: '2023-11-22', 64 | }, 65 | path: { id }, 66 | }, 67 | body, 68 | }), 69 | ); 70 | } 71 | 72 | async delete( 73 | input: SystemPromptServiceDeleteInput, 74 | opts?: Options, 75 | ): Promise { 76 | return clientErrorWrapper( 77 | this._client.DELETE('/v2/system_prompts/{id}', { 78 | ...opts, 79 | params: { 80 | query: { 81 | version: '2023-11-22', 82 | }, 83 | path: input, 84 | }, 85 | }), 86 | ); 87 | } 88 | 89 | async list( 90 | input: SystemPromptServiceListInput, 91 | opts?: Options, 92 | ): Promise { 93 | return clientErrorWrapper( 94 | this._client.GET('/v2/system_prompts', { 95 | ...opts, 96 | params: { 97 | query: { 98 | ...input, 99 | version: '2023-11-22', 100 | }, 101 | }, 102 | }), 103 | ); 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/services/FileService.ts: -------------------------------------------------------------------------------- 1 | import { Options } from '../client.js'; 2 | import { clientErrorWrapper } from '../utils/errors.js'; 3 | import { 4 | FileServiceCreateInput, 5 | FileServiceCreateOutput, 6 | FileServiceDeleteInput, 7 | FileServiceDeleteOutput, 8 | FileServiceListInput, 9 | FileServiceListOutput, 10 | FileServiceReadInput, 11 | FileServiceReadOutput, 12 | FileServiceRetrieveInput, 13 | FileServiceRetrieveOutput, 14 | } from '../schema.js'; 15 | 16 | import { BaseService } from './BaseService.js'; 17 | 18 | export class FileService extends BaseService { 19 | async create( 20 | input: FileServiceCreateInput, 21 | opts?: Options, 22 | ): Promise { 23 | return clientErrorWrapper( 24 | this._client.POST('/v2/files', { 25 | ...opts, 26 | body: { ...input, file: input.file.content }, // file is supplied just to avoid typecast 27 | bodySerializer(body) { 28 | const formData = new FormData(); 29 | formData.append('purpose', body.purpose); 30 | formData.append('file', input.file.content, input.file.name); 31 | return formData; 32 | }, 33 | params: { 34 | query: { 35 | version: '2024-05-13', 36 | }, 37 | }, 38 | }), 39 | ); 40 | } 41 | 42 | async retrieve( 43 | input: FileServiceRetrieveInput, 44 | opts?: Options, 45 | ): Promise { 46 | return clientErrorWrapper( 47 | this._client.GET('/v2/files/{id}', { 48 | ...opts, 49 | params: { 50 | path: input, 51 | query: { 52 | version: '2024-05-13', 53 | }, 54 | }, 55 | }), 56 | ); 57 | } 58 | 59 | async read( 60 | input: FileServiceReadInput, 61 | opts?: Options, 62 | ): Promise { 63 | return clientErrorWrapper( 64 | this._client.GET('/v2/files/{id}/content', { 65 | ...opts, 66 | params: { 67 | path: input, 68 | query: { 69 | version: '2023-11-22', 70 | }, 71 | }, 72 | parseAs: 'blob', 73 | }), 74 | ); 75 | } 76 | 77 | async delete( 78 | input: FileServiceDeleteInput, 79 | opts?: Options, 80 | ): Promise { 81 | return clientErrorWrapper( 82 | this._client.DELETE('/v2/files/{id}', { 83 | ...opts, 84 | params: { 85 | path: input, 86 | query: { 87 | version: '2023-11-22', 88 | }, 89 | }, 90 | }), 91 | ); 92 | } 93 | 94 | async list( 95 | input: FileServiceListInput, 96 | opts?: Options, 97 | ): Promise { 98 | return clientErrorWrapper( 99 | this._client.GET('/v2/files', { 100 | ...opts, 101 | params: { 102 | query: { 103 | ...input, 104 | version: '2024-05-13', 105 | }, 106 | }, 107 | }), 108 | ); 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/client.ts: -------------------------------------------------------------------------------- 1 | import fetchRetry from 'fetch-retry'; 2 | import { StatusCodes } from 'http-status-codes'; 3 | 4 | import { InvalidInputError } from './errors.js'; 5 | import { version } from './buildInfo.js'; 6 | import { lookupApiKey, lookupEndpoint } from './helpers/config.js'; 7 | import { createApiClient } from './api/client.js'; 8 | import { createStreamingApiClient } from './api/streaming-client.js'; 9 | import { TextService } from './services/text/TextService.js'; 10 | import { ModelService } from './services/ModelService.js'; 11 | import { PromptService } from './services/PromptService.js'; 12 | import { RequestService } from './services/RequestService.js'; 13 | import { TuneService } from './services/TuneService.js'; 14 | import { UserService } from './services/UserService.js'; 15 | import { FileService } from './services/FileService.js'; 16 | import { SystemPromptService } from './services/SystemPromptService.js'; 17 | 18 | export interface Configuration { 19 | apiKey?: string; 20 | endpoint?: string; 21 | headers?: Headers; 22 | } 23 | 24 | export type Options = { signal?: AbortSignal }; 25 | 26 | export class Client { 27 | public readonly text: TextService; 28 | public readonly model: ModelService; 29 | public readonly request: RequestService; 30 | public readonly prompt: PromptService; 31 | public readonly tune: TuneService; 32 | public readonly user: UserService; 33 | public readonly file: FileService; 34 | public readonly systemPrompt: SystemPromptService; 35 | 36 | constructor(config: Configuration = {}) { 37 | const endpoint = config.endpoint ?? lookupEndpoint(); 38 | if (!endpoint) { 39 | throw new InvalidInputError('Configuration endpoint is missing!'); 40 | } 41 | 42 | const apiKey = config.apiKey ?? lookupApiKey(); 43 | if (!apiKey) { 44 | throw new InvalidInputError('Configuration API key is missing!'); 45 | } 46 | 47 | const agent = version ? `node-sdk/${version}` : 'node-sdk'; 48 | 49 | const headers = new Headers(config.headers); 50 | headers.set('user-agent', agent); 51 | headers.set('x-request-origin', agent); 52 | headers.set('authorization', `Bearer ${apiKey}`); 53 | 54 | const _client = createApiClient({ 55 | baseUrl: endpoint, 56 | headers, 57 | fetch: fetchRetry(fetch, { 58 | retryOn: [ 59 | StatusCodes.TOO_MANY_REQUESTS, // Retry also when concurrency limits (due to external factors) are hit 60 | StatusCodes.BAD_GATEWAY, 61 | StatusCodes.SERVICE_UNAVAILABLE, 62 | StatusCodes.CONFLICT, 63 | StatusCodes.GATEWAY_TIMEOUT, 64 | StatusCodes.REQUEST_TIMEOUT, 65 | StatusCodes.INTERNAL_SERVER_ERROR, 66 | ], 67 | retryDelay: function (attempt) { 68 | return Math.pow(2, attempt) * 1000; 69 | }, 70 | }), 71 | }); 72 | 73 | const _streamingClient = createStreamingApiClient({ 74 | baseUrl: endpoint, 75 | headers, 76 | }); 77 | 78 | this.text = new TextService(_client, _streamingClient); 79 | this.model = new ModelService(_client, _streamingClient); 80 | this.request = new RequestService(_client, _streamingClient); 81 | this.prompt = new PromptService(_client, _streamingClient); 82 | this.tune = new TuneService(_client, _streamingClient); 83 | this.user = new UserService(_client, _streamingClient); 84 | this.file = new FileService(_client, _streamingClient); 85 | this.systemPrompt = new SystemPromptService(_client, _streamingClient); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@ibm-generative-ai/node-sdk", 3 | "version": "3.2.4", 4 | "description": "IBM Generative AI Node.js SDK (Tech Preview)", 5 | "keywords": [ 6 | "ai", 7 | "generative", 8 | "genai", 9 | "ibm" 10 | ], 11 | "homepage": "https://bam.res.ibm.com", 12 | "license": "MIT", 13 | "repository": { 14 | "type": "git", 15 | "url": "https://github.com/IBM/ibm-generative-ai-node-sdk.git" 16 | }, 17 | "bugs": { 18 | "url": "https://github.com/IBM/ibm-generative-ai-node-sdk/issues" 19 | }, 20 | "packageManager": "yarn@3.5.0", 21 | "type": "module", 22 | "files": [ 23 | "dist/**/*" 24 | ], 25 | "exports": { 26 | ".": { 27 | "import": { 28 | "types": "./dist/index.d.ts", 29 | "default": "./dist/index.js" 30 | }, 31 | "require": { 32 | "types": "./dist/index.d.cts", 33 | "default": "./dist/index.cjs" 34 | } 35 | }, 36 | "./langchain": { 37 | "import": { 38 | "types": "./dist/langchain/index.d.ts", 39 | "default": "./dist/langchain/index.js" 40 | }, 41 | "require": { 42 | "types": "./dist/langchain/index.d.cts", 43 | "default": "./dist/langchain/index.cjs" 44 | } 45 | } 46 | }, 47 | "typesVersions": { 48 | "*": { 49 | ".": [ 50 | "dist/index.d.ts" 51 | ], 52 | "langchain": [ 53 | "dist/langchain/index.d.ts" 54 | ] 55 | } 56 | }, 57 | "main": "./dist/index.js", 58 | "types": "./dist/index.d.ts", 59 | "sideEffects": false, 60 | "scripts": { 61 | "build": "tsup", 62 | "format": "prettier --write .", 63 | "lint": "eslint .", 64 | "test": "vitest", 65 | "test:coverage": "vitest --coverage", 66 | "prepack": "yarn build && pinst --disable", 67 | "postinstall": "husky install", 68 | "postpack": "pinst --enable", 69 | "generate": "./scripts/generate.sh", 70 | "generate:new": "node ./scripts/generate.js", 71 | "examples": "./scripts/examples.sh" 72 | }, 73 | "peerDependencies": { 74 | "@langchain/core": ">=0.1.0" 75 | }, 76 | "peerDependenciesMeta": { 77 | "@langchain/core": { 78 | "optional": true 79 | } 80 | }, 81 | "devDependencies": { 82 | "@commitlint/cli": "^18.0.0", 83 | "@commitlint/config-conventional": "^18.0.0", 84 | "@langchain/core": "^0.1.0", 85 | "@types/lodash": "^4.14.200", 86 | "@types/node": "^20.11.19", 87 | "@typescript-eslint/eslint-plugin": "^6.9.0", 88 | "@typescript-eslint/parser": "^6.9.0", 89 | "@vitest/coverage-c8": "^0.31.2", 90 | "compare-versions": "^6.1.0", 91 | "dotenv-flow": "^4.0.0", 92 | "eslint": "^8.52.0", 93 | "eslint-config-prettier": "^9.0.0", 94 | "eslint-import-resolver-typescript": "^3.6.1", 95 | "eslint-plugin-import": "^2.29.0", 96 | "husky": "^8.0.3", 97 | "jest-extended": "^4.0.2", 98 | "lint-staged": "^15.0.2", 99 | "lodash": "^4.17.21", 100 | "msw": "^1.3.2", 101 | "openapi-typescript": "^6.7.4", 102 | "pinst": "^3.0.0", 103 | "prettier": "^3.0.3", 104 | "ts-node": "^10.9.1", 105 | "tsup": "^8.0.2", 106 | "typescript": "^5.4.3", 107 | "vitest": "^0.34.6" 108 | }, 109 | "dependencies": { 110 | "@ai-zen/node-fetch-event-source": "^2.1.2", 111 | "fetch-retry": "^5.0.6", 112 | "http-status-codes": "^2.3.0", 113 | "openapi-fetch": "^0.8.2", 114 | "p-queue-compat": "1.0.225", 115 | "yaml": "^2.3.3" 116 | }, 117 | "lint-staged": { 118 | "*.{cjs,js,jsx,ts,tsx}": [ 119 | "eslint --fix" 120 | ], 121 | "*.md": [ 122 | "prettier --write" 123 | ] 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /src/api/streaming-client.ts: -------------------------------------------------------------------------------- 1 | import { 2 | EventStreamContentType, 3 | fetchEventSource, 4 | } from '@ai-zen/node-fetch-event-source'; 5 | import { mergeHeaders } from 'openapi-fetch'; 6 | 7 | import { TypedReadable } from '../utils/stream.js'; 8 | import { BaseError, HttpError, InternalError } from '../errors.js'; 9 | import { safeParseJson } from '../helpers/common.js'; 10 | 11 | export interface SteamingApiClient { 12 | stream: (opts: { 13 | url: string; 14 | headers?: Headers; 15 | body?: any; 16 | signal?: AbortSignal; 17 | }) => TypedReadable; 18 | } 19 | 20 | export function createStreamingApiClient(clientOptions: { 21 | baseUrl?: string; 22 | headers?: Headers; 23 | }): SteamingApiClient { 24 | return { 25 | stream: function fetchSSE({ 26 | url, 27 | headers, 28 | body, 29 | signal, 30 | }: Parameters[0]) { 31 | const outputStream = new TypedReadable({ 32 | autoDestroy: true, 33 | objectMode: true, 34 | signal: signal, 35 | }); 36 | 37 | const onClose = () => { 38 | if (outputStream.readable) { 39 | outputStream.push(null); 40 | } 41 | }; 42 | 43 | const delegatedController = new AbortController(); 44 | if (signal) { 45 | signal.addEventListener( 46 | 'abort', 47 | () => { 48 | delegatedController.abort(); 49 | }, 50 | { 51 | once: true, 52 | }, 53 | ); 54 | } 55 | 56 | const onError = (e: unknown) => { 57 | const err = 58 | e instanceof BaseError 59 | ? e 60 | : new InternalError('Unexpected error', { cause: e }); 61 | 62 | delegatedController.abort(); 63 | if (outputStream.readable) { 64 | outputStream.emit('error', err); 65 | throw err; 66 | } 67 | onClose(); 68 | }; 69 | fetchEventSource(new URL(url, clientOptions.baseUrl).toString(), { 70 | method: 'POST', 71 | body: JSON.stringify(body), 72 | headers: Object.fromEntries( 73 | mergeHeaders(clientOptions.headers, headers, { 74 | 'Content-Type': 'application/json', 75 | }) as any, // Types are incomplete, support is there in Node 18 https://developer.mozilla.org/en-US/docs/Web/API/Headers 76 | ), 77 | signal: delegatedController.signal, 78 | onclose: onClose, 79 | async onopen(response) { 80 | const contentType = response.headers.get('content-type') || ''; 81 | 82 | if (response.ok && contentType === EventStreamContentType) { 83 | return; 84 | } 85 | 86 | const responseData = contentType.startsWith('application/json') 87 | ? await response.json().catch(() => null) 88 | : await response.text(); 89 | 90 | onError(new HttpError(responseData)); 91 | }, 92 | onmessage(message) { 93 | if (message.event === 'close') { 94 | onClose(); 95 | return; 96 | } 97 | if (message.data === '') { 98 | return; 99 | } 100 | 101 | const result = safeParseJson(message.data); 102 | if (result === null) { 103 | onError( 104 | new InternalError( 105 | `Failed to parse message "${JSON.stringify(message)}"`, 106 | ), 107 | ); 108 | return; 109 | } 110 | if (message.event === 'error') { 111 | onError(new HttpError(result)); 112 | return; 113 | } 114 | 115 | outputStream.push(result); 116 | }, 117 | onerror: onError, 118 | }).catch(() => { 119 | /* Prevent uncaught exception (errors are handled inside the stream) */ 120 | }); 121 | 122 | return outputStream; 123 | }, 124 | }; 125 | } 126 | -------------------------------------------------------------------------------- /tests/e2e/langchain/llm-chat.test.ts: -------------------------------------------------------------------------------- 1 | import { HumanMessage, SystemMessage } from '@langchain/core/messages'; 2 | 3 | import { GenAIChatModel } from '../../../src/langchain/index.js'; 4 | 5 | // Remove once some chat models will be supported in target env 6 | describe('LangChain Chat', () => { 7 | const makeModel = (conversation_id?: string) => 8 | new GenAIChatModel({ 9 | model_id: 'meta-llama/llama-3-70b-instruct', 10 | conversation_id, 11 | configuration: { 12 | endpoint: process.env.ENDPOINT, 13 | apiKey: process.env.API_KEY, 14 | }, 15 | parameters: { 16 | decoding_method: 'greedy', 17 | min_new_tokens: 1, 18 | max_new_tokens: 25, 19 | repetition_penalty: 2, 20 | }, 21 | }); 22 | 23 | const expectIsNonEmptyString = (value?: unknown) => { 24 | expect(value).toBeString(); 25 | expect(value).toBeTruthy(); 26 | }; 27 | 28 | describe('generate', () => { 29 | const SYSTEM_MESSAGE = [ 30 | `You are a reliable English-to-French translation assistant.`, 31 | `Your task is to accurately translate English text into French.`, 32 | `Focus solely on providing the translation without including any additional information or content.`, 33 | ].join(' '); 34 | 35 | test('should handle single question', async () => { 36 | const chat = makeModel(); 37 | 38 | const response = await chat.invoke( 39 | [ 40 | new HumanMessage( 41 | 'What is a good name for a company that makes colorful socks?', 42 | ), 43 | ], 44 | { parameters: { decoding_method: 'sample' } }, 45 | ); 46 | expectIsNonEmptyString(response.content); 47 | }); 48 | 49 | test('should handle a conversation', async () => { 50 | let chat = makeModel(); 51 | const { generations } = await chat.generate([ 52 | [ 53 | new HumanMessage( 54 | 'What is a good name for a company that makes colorful socks?', 55 | ), 56 | ], 57 | ]); 58 | expectIsNonEmptyString(generations[0][0].text); 59 | expectIsNonEmptyString(generations[0][0].generationInfo?.conversationId); 60 | 61 | chat = makeModel(generations[0][0].generationInfo?.conversationId); 62 | const response = await chat.invoke([ 63 | new HumanMessage( 64 | 'What is a good name for a company that makes colorful socks?', 65 | ), 66 | ]); 67 | expectIsNonEmptyString(response.content); 68 | }, 15_000); 69 | 70 | test('should handle question with additional hint', async () => { 71 | const chat = makeModel(); 72 | 73 | const response = await chat.invoke([ 74 | new SystemMessage(SYSTEM_MESSAGE), 75 | new HumanMessage('I love programming.'), 76 | ]); 77 | expectIsNonEmptyString(response.content); 78 | }); 79 | 80 | test('should handle multiple questions', async () => { 81 | const chat = makeModel(); 82 | 83 | const response = await chat.generate([ 84 | [ 85 | new SystemMessage(SYSTEM_MESSAGE), 86 | new HumanMessage('I love programming.'), 87 | ], 88 | [ 89 | new SystemMessage(SYSTEM_MESSAGE), 90 | new HumanMessage('I love artificial intelligence.'), 91 | ], 92 | ]); 93 | 94 | expect(response).toBeDefined(); 95 | expect(response.generations).toHaveLength(2); 96 | expect(response.generations[0]).toHaveLength(1); 97 | expectIsNonEmptyString(response.generations[0][0].text); 98 | expect(response.generations[1]).toHaveLength(1); 99 | expectIsNonEmptyString(response.generations[1][0].text); 100 | }); 101 | 102 | test('should handle streaming', async () => { 103 | const chat = makeModel(); 104 | 105 | const tokens: string[] = []; 106 | const handleText = vi.fn((token: string) => { 107 | tokens.push(token); 108 | }); 109 | 110 | const outputStream = await chat.stream( 111 | [new HumanMessage('Tell me a joke.')], 112 | { 113 | callbacks: [{ handleText: handleText }], 114 | }, 115 | ); 116 | const contents = []; 117 | for await (const output of outputStream) { 118 | expect(output.content).toBeString(); 119 | contents.push(output.content as string); 120 | } 121 | expect(handleText).toBeCalledTimes(contents.length); 122 | expect(tokens).toStrictEqual(contents); 123 | }); 124 | }); 125 | 126 | it('Serializes', async () => { 127 | const model = makeModel(); 128 | const serialized = model.toJSON(); 129 | const deserialized = await GenAIChatModel.fromJSON( 130 | serialized, 131 | model.client, 132 | ); 133 | expect(deserialized).toBeInstanceOf(GenAIChatModel); 134 | }); 135 | }); 136 | -------------------------------------------------------------------------------- /tests/e2e/langchain/llm.test.ts: -------------------------------------------------------------------------------- 1 | import { PromptTemplate } from '@langchain/core/prompts'; 2 | import { StringOutputParser } from '@langchain/core/output_parsers'; 3 | 4 | import { AbortError } from '../../../src/errors.js'; 5 | import { GenAIModel } from '../../../src/langchain/llm.js'; 6 | import { Client } from '../../../src/client.js'; 7 | 8 | describe('Langchain', () => { 9 | const makeModel = (modelId: string) => 10 | new GenAIModel({ 11 | model_id: modelId, 12 | client: new Client({ 13 | endpoint: process.env.ENDPOINT, 14 | apiKey: process.env.API_KEY, 15 | }), 16 | parameters: { 17 | top_k: 1, 18 | max_new_tokens: 5, 19 | min_new_tokens: 2, 20 | }, 21 | }); 22 | 23 | const expectIsString = (value?: unknown) => { 24 | expect(value).toBeString(); 25 | expect(value).toBeTruthy(); 26 | }; 27 | 28 | describe('tokenization', () => { 29 | it('should correctly calculate tokens', async () => { 30 | const client = makeModel('google/flan-ul2'); 31 | const tokensCount = await client.getNumTokens( 32 | 'What is the biggest building on this planet?', 33 | ); 34 | expect(tokensCount).toBePositive(); 35 | }); 36 | }); 37 | 38 | it('Serializes', async () => { 39 | const model = makeModel('google/flan-ul2'); 40 | const serialized = model.toJSON(); 41 | const deserialized = await GenAIModel.fromJSON(serialized, model.client); 42 | expect(deserialized).toBeInstanceOf(GenAIModel); 43 | }); 44 | 45 | describe('generate', () => { 46 | // TODO: enable once we will set default model for the test account 47 | test.skip('should handle empty modelId', async () => { 48 | const client = makeModel('google/flan-ul2'); 49 | 50 | const data = await client.invoke('Who are you?'); 51 | expectIsString(data); 52 | }, 15_000); 53 | 54 | test('should return correct response for a single input', async () => { 55 | const client = makeModel('google/flan-ul2'); 56 | 57 | const data = await client.invoke('Hello, World'); 58 | expectIsString(data); 59 | }, 15_000); 60 | 61 | test('should return correct response for each input', async () => { 62 | const client = makeModel('google/flan-ul2'); 63 | 64 | const inputs = ['Hello, World', 'Hello again']; 65 | 66 | const outputs = await client.generate(inputs); 67 | expect(outputs.generations).toHaveLength(inputs.length); 68 | expect(outputs.llmOutput).toBeDefined(); 69 | expect(outputs.llmOutput?.generated_token_count).toBeGreaterThan(0); 70 | expect(outputs.llmOutput?.input_token_count).toBeGreaterThan(0); 71 | 72 | outputs.generations.forEach(([output]) => { 73 | expect(output.text).toBeTruthy(); 74 | expect(typeof output.text).toBe('string'); 75 | 76 | expect(output.generationInfo).toMatchObject({ 77 | generated_token_count: expect.any(Number), 78 | input_token_count: expect.any(Number), 79 | stop_reason: expect.any(String), 80 | }); 81 | }); 82 | }, 20_000); 83 | 84 | test('should reject with ERR_CANCELED when aborted', async () => { 85 | const model = makeModel('google/flan-ul2'); 86 | 87 | const controller = new AbortController(); 88 | const generatePromise = model.generate(['Hello, World'], { 89 | signal: controller.signal, 90 | }); 91 | 92 | setTimeout(() => { 93 | controller.abort(); 94 | }, 50); 95 | 96 | await expect(generatePromise).rejects.toBeInstanceOf(AbortError); 97 | }); 98 | 99 | test('should reject with ETIMEDOUT when timed out', async () => { 100 | const model = makeModel('google/flan-ul2'); 101 | 102 | await expect( 103 | model.invoke('Hello, World', { timeout: 10 }), 104 | ).rejects.toThrow(); 105 | }); 106 | 107 | test('streaming', async () => { 108 | const client = makeModel('google/flan-t5-xl'); 109 | 110 | const tokens: string[] = []; 111 | const handleText = vi.fn((token: string) => { 112 | tokens.push(token); 113 | }); 114 | 115 | const stream = await client.stream('Tell me a joke.', { 116 | callbacks: [ 117 | { 118 | handleText, 119 | }, 120 | ], 121 | }); 122 | 123 | const outputs = []; 124 | for await (const output of stream) { 125 | outputs.push(output); 126 | } 127 | expect(handleText).toHaveBeenCalledTimes(outputs.length); 128 | expect(tokens.join('')).toStrictEqual(outputs.join('')); 129 | }, 15_000); 130 | }); 131 | 132 | describe('chaining', () => { 133 | const model = makeModel('google/flan-t5-xl'); 134 | 135 | test('chaining', async () => { 136 | const prompt = new PromptTemplate({ 137 | template: 'What is a good name for a company that makes {product}?', 138 | inputVariables: ['product'], 139 | }); 140 | const outputParser = new StringOutputParser(); 141 | 142 | const chain = prompt.pipe(model).pipe(outputParser); 143 | const text = await chain.invoke({ product: 'colorful socks' }); 144 | expectIsString(text); 145 | }, 20_000); 146 | }); 147 | }); 148 | -------------------------------------------------------------------------------- /tests/integration/client.test.ts: -------------------------------------------------------------------------------- 1 | import { 2 | MOCK_ENDPOINT, 3 | modelsStore, 4 | tokenizeStore, 5 | tuneMethodsStore, 6 | tunesStore, 7 | historyStore, 8 | } from '../mocks/handlers.js'; 9 | import { Client } from '../../src/client.js'; 10 | 11 | const dummyTune = { 12 | name: 'newTune', 13 | model_id: 'foo', 14 | tuning_type: 'prompt_tuning' as const, 15 | task_id: 'foo', 16 | training_file_ids: [], 17 | }; 18 | 19 | describe('client', () => { 20 | let client: Client; 21 | beforeEach(() => { 22 | client = new Client({ 23 | endpoint: MOCK_ENDPOINT, 24 | apiKey: 'foobar', 25 | }); 26 | }); 27 | 28 | describe('fetch', () => { 29 | test("should hit endpoint when endpoint contains trailing '/'", async () => { 30 | const client = new Client({ 31 | endpoint: MOCK_ENDPOINT + '/', 32 | apiKey: 'foobar', 33 | }); 34 | await expect( 35 | client.text.generation.create({ 36 | model_id: 'bigscience/bloom', 37 | input: 'Hello, World', 38 | }), 39 | ).toResolve(); 40 | }); 41 | }); 42 | 43 | describe('generate', () => { 44 | test('should return single output for a single input', async () => { 45 | const response = await client.text.generation.create({ 46 | model_id: 'bigscience/bloom', 47 | input: 'Hello, World', 48 | }); 49 | expect(response.results).toBeArrayOfSize(1); 50 | }, 15_000); 51 | }); 52 | 53 | describe('tokenize', () => { 54 | test('should return tokenize info', async () => { 55 | await expect( 56 | client.text.tokenization.create({ 57 | input: 'Hello, how are you? Are you okay?', 58 | model_id: 'google/flan-t5-xl', 59 | }), 60 | ).resolves.toMatchObject({ results: [tokenizeStore] }); 61 | }); 62 | }); 63 | 64 | describe('chat', () => { 65 | test('should start a conversation', async () => { 66 | await expect( 67 | client.text.chat.create({ 68 | model_id: 'google/flan-t5-xl', 69 | messages: [ 70 | { role: 'system', content: 'foo' }, 71 | { role: 'user', content: 'bar' }, 72 | ], 73 | }), 74 | ).resolves.toHaveProperty('conversation_id'); 75 | }); 76 | 77 | test('should continue an existing conversation', async () => { 78 | await expect( 79 | client.text.chat.create({ 80 | model_id: 'google/flan-t5-xl', 81 | conversation_id: 'foo', 82 | messages: [{ role: 'user', content: 'bar' }], 83 | }), 84 | ).resolves.toHaveProperty('conversation_id', 'foo'); 85 | }); 86 | }); 87 | 88 | describe('models', () => { 89 | test('should return some models', async () => { 90 | const models = await client.model.list({ limit: 100, offset: 0 }); 91 | expect(models.results.length).not.toBeEmpty(); 92 | }); 93 | 94 | test('should return details for a given model', async () => { 95 | const id = modelsStore[0].id; 96 | const details = await client.model.retrieve({ id }); 97 | expect(details.result).toHaveProperty('id', id); 98 | }); 99 | }); 100 | 101 | describe('tunes', () => { 102 | test('should list all tunes', async () => { 103 | const response = await client.tune.list({}); 104 | expect(response.results).toBeArrayOfSize(tunesStore.length); 105 | response.results.forEach((tune, idx) => { 106 | expect(tune).toHaveProperty('id', tunesStore[idx].id); 107 | }); 108 | }); 109 | 110 | test('should list all tune methods', async () => { 111 | const response = await client.tune.types({}); 112 | expect(response.results).toBeArrayOfSize(tuneMethodsStore.length); 113 | }); 114 | 115 | test('should show details of a tune', async () => { 116 | const { id } = tunesStore[0]; 117 | const response = await client.tune.retrieve({ id }); 118 | expect(response.result).toHaveProperty('id', id); 119 | }); 120 | 121 | test('should download assets of a completed tune', async () => { 122 | const { id } = tunesStore[0]; 123 | const content = await client.tune.read({ id, type: 'vectors' }); 124 | expect(content).toBeDefined(); 125 | }); 126 | 127 | test('should create a tune', async () => { 128 | const response = await client.tune.create(dummyTune); 129 | expect(tunesStore.map(({ id }) => id)).toContainEqual(response.result.id); 130 | }); 131 | 132 | test('should delete a tune', async () => { 133 | const { id } = tunesStore[1]; 134 | await client.tune.delete({ id }); 135 | expect(tunesStore.map(({ id }) => id)).not.toContain({ id }); 136 | }); 137 | }); 138 | 139 | describe('request', () => { 140 | test('should list all requests from the past', async () => { 141 | const limit = Math.min(2, historyStore.length); 142 | const response = await client.request.list({ offset: 0, limit }); 143 | expect(response.results).toBeArrayOfSize(limit); 144 | }); 145 | }); 146 | 147 | describe('cross method', () => { 148 | test('should not get deleted tune via model', async () => { 149 | const { 150 | result: { id }, 151 | } = await client.tune.create(dummyTune); 152 | await client.model.retrieve({ id }); 153 | await client.tune.delete({ id }); 154 | await expect(client.model.retrieve({ id })).toReject(); 155 | }); 156 | 157 | test('should not get deleted tune in models list', async () => { 158 | const { 159 | result: { id }, 160 | } = await client.tune.create(dummyTune); 161 | const response = await client.model.list({ limit: 100, offset: 0 }); 162 | expect(response.results.map(({ id }) => id)).toContain(id); 163 | await client.tune.delete({ id }); 164 | const laterResponse = await client.model.list({ limit: 100, offset: 0 }); 165 | expect(laterResponse.results.map(({ id }) => id)).not.toContain(id); 166 | }); 167 | 168 | test('should get newly added tune in models list', async () => { 169 | await client.model.list({ limit: 100, offset: 0 }); 170 | const { 171 | result: { id }, 172 | } = await client.tune.create(dummyTune); 173 | const response = await client.model.list({ limit: 100, offset: 0 }); 174 | expect(response.results.map(({ id }) => id)).toContain(id); 175 | }); 176 | }); 177 | }); 178 | -------------------------------------------------------------------------------- /src/helpers/common.ts: -------------------------------------------------------------------------------- 1 | import { callbackify } from 'node:util'; 2 | import { URLSearchParams } from 'node:url'; 3 | import { Readable } from 'node:stream'; 4 | 5 | import { z } from 'zod'; 6 | 7 | import { ErrorCallback, DataCallback, Truthy, Callback } from './types.js'; 8 | 9 | export function isTruthy(value: T): value is Truthy { 10 | return Boolean(value); 11 | } 12 | 13 | export function concatUnique( 14 | ...arrays: Array | undefined | null> 15 | ): T[] { 16 | const merged = arrays.filter(isTruthy).flat(); 17 | return Array.from(new Set(merged).values()); 18 | } 19 | 20 | export function isNotEmptyArray(arr: T[]): arr is [T, ...T[]] { 21 | return Array.isArray(arr) && arr.length > 0; 22 | } 23 | 24 | export async function wait(ms: number) { 25 | await new Promise((resolve) => setTimeout(resolve, ms)); 26 | } 27 | 28 | export type AnyFn = (...args: any[]) => any; 29 | export function isFunction( 30 | value: unknown, 31 | ): value is T { 32 | return z.function().safeParse(value).success; 33 | } 34 | 35 | export function safeParseJson(value: unknown): T | null { 36 | try { 37 | return JSON.parse(typeof value === 'string' ? value : String(value)); 38 | } catch { 39 | return null; 40 | } 41 | } 42 | 43 | export type Unwrap = T extends Array ? P : T; 44 | 45 | export function parseFunctionOverloads( 46 | inputOrOptionsOrCallback?: A | B | C, 47 | optionsOrCallback?: B | C, 48 | callback?: C, 49 | ): { 50 | input?: Exclude; 51 | options?: Exclude; 52 | callback?: C; 53 | } { 54 | if (isFunction(inputOrOptionsOrCallback)) { 55 | return { callback: inputOrOptionsOrCallback }; 56 | } 57 | 58 | const input = inputOrOptionsOrCallback as A; 59 | const options = isFunction(optionsOrCallback) ? undefined : optionsOrCallback; 60 | const cb = isFunction(optionsOrCallback) ? optionsOrCallback : callback; 61 | 62 | return { 63 | input: input as Exclude, 64 | options: options as Exclude, 65 | callback: cb as C, 66 | }; 67 | } 68 | 69 | export function handle( 70 | params: { 71 | inputOrOptionsOrCallback?: A | B | C; 72 | optionsOrCallback?: B | C; 73 | callback?: C; 74 | }, 75 | executor: (params: { 76 | input?: Exclude; 77 | options?: Exclude; 78 | }) => Promise, 79 | ) { 80 | const { input, options, callback } = parseFunctionOverloads( 81 | params.inputOrOptionsOrCallback, 82 | params.optionsOrCallback, 83 | params.callback, 84 | ); 85 | 86 | const executorWrapper = () => 87 | executor({ 88 | input, 89 | options, 90 | }); 91 | 92 | if (callback) { 93 | return callbackify(executorWrapper)(callback); 94 | } 95 | return executorWrapper(); 96 | } 97 | 98 | export function handleGenerator( 99 | params: { 100 | inputOrOptionsOrCallback?: A | B | C; 101 | optionsOrCallback?: B | C; 102 | callback?: C; 103 | }, 104 | executor: (params: { 105 | input?: Exclude; 106 | options?: Exclude; 107 | }) => AsyncGenerator, 108 | ) { 109 | const { input, options, callback } = parseFunctionOverloads( 110 | params.inputOrOptionsOrCallback, 111 | params.optionsOrCallback, 112 | params.callback, 113 | ); 114 | 115 | const executorWrapper = () => 116 | executor({ 117 | input, 118 | options, 119 | }); 120 | 121 | if (callback) { 122 | return callbackifyGenerator(executorWrapper)(callback); 123 | } 124 | return executorWrapper(); 125 | } 126 | 127 | export function isTypeOf( 128 | value: unknown | undefined, 129 | result: boolean, 130 | ): value is T { 131 | return result; 132 | } 133 | 134 | export function isNullish( 135 | value: T | null | undefined, 136 | ): value is null | undefined { 137 | return value === null || value === undefined; 138 | } 139 | 140 | export function callbackifyGenerator(generatorFn: () => AsyncGenerator) { 141 | return (callback: AnyFn) => { 142 | (async () => { 143 | try { 144 | for await (const result of generatorFn()) { 145 | callback(null, result); 146 | } 147 | } catch (err) { 148 | callback(err); 149 | } 150 | })(); 151 | }; 152 | } 153 | 154 | export function callbackifyStream(stream: Readable) { 155 | return (callbackFn: Callback) => { 156 | stream.on('data', (data) => callbackFn(null, data)); 157 | stream.on('error', (err) => (callbackFn as ErrorCallback)(err)); 158 | stream.on('finish', () => 159 | (callbackFn as DataCallback)(null, null), 160 | ); 161 | }; 162 | } 163 | 164 | export function callbackifyPromise(promise: Promise) { 165 | return (callbackFn: Callback) => { 166 | promise.then( 167 | (data) => callbackFn(null, data), 168 | (err) => (callbackFn as ErrorCallback)(err), 169 | ); 170 | }; 171 | } 172 | 173 | export async function* paginator( 174 | executor: (searchParams: URLSearchParams) => Promise<{ 175 | results: T[]; 176 | totalCount: number; 177 | }>, 178 | { 179 | offset = 0, 180 | count = Infinity, 181 | params, 182 | limit = 100, 183 | }: { 184 | offset?: number; 185 | count?: number; 186 | params?: URLSearchParams; 187 | limit?: number; 188 | }, 189 | ): AsyncGenerator { 190 | let currentOffset = offset; 191 | let remainingCount = count; 192 | let totalCount = Infinity; 193 | while (currentOffset < totalCount) { 194 | const paginatedSearchParams = new URLSearchParams(params); 195 | paginatedSearchParams.set('offset', currentOffset.toString()); 196 | paginatedSearchParams.set( 197 | 'limit', 198 | Math.min(remainingCount, limit).toString(), 199 | ); 200 | const output = await executor(paginatedSearchParams); 201 | for (const result of output.results) { 202 | yield result; 203 | if (--remainingCount === 0) return; 204 | ++currentOffset; 205 | } 206 | totalCount = output.totalCount; 207 | } 208 | } 209 | 210 | export function isEmptyObject>( 211 | obj: T, 212 | ): obj is Record { 213 | for (const key in obj) { 214 | if (Object.prototype.hasOwnProperty.call(obj, key)) { 215 | return false; 216 | } 217 | } 218 | return true; 219 | } 220 | 221 | export async function asyncGeneratorToArray( 222 | generator: AsyncGenerator, 223 | ) { 224 | const response = { 225 | chunks: [] as T[], 226 | output: undefined as L, 227 | }; 228 | 229 | // eslint-disable-next-line no-constant-condition 230 | while (true) { 231 | const { done, value } = await generator.next(); 232 | if (done) { 233 | response.output = value; 234 | break; 235 | } 236 | 237 | response.chunks.push(value); 238 | } 239 | 240 | return response; 241 | } 242 | -------------------------------------------------------------------------------- /src/langchain/llm.ts: -------------------------------------------------------------------------------- 1 | import { 2 | BaseLLM, 3 | BaseLLMCallOptions, 4 | BaseLLMParams, 5 | } from '@langchain/core/language_models/llms'; 6 | import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'; 7 | import type { LLMResult } from '@langchain/core/outputs'; 8 | import { GenerationChunk } from '@langchain/core/outputs'; 9 | import merge from 'lodash/merge.js'; 10 | import { load } from '@langchain/core/load'; 11 | import type { Serialized } from '@langchain/core/load/serializable'; 12 | 13 | import { Client, Configuration } from '../client.js'; 14 | import { concatUnique, isNullish } from '../helpers/common.js'; 15 | import { 16 | TextGenerationCreateInput, 17 | TextGenerationCreateStreamInput, 18 | } from '../schema.js'; 19 | 20 | type TextGenerationInput = TextGenerationCreateInput & 21 | TextGenerationCreateStreamInput; 22 | 23 | export type GenAIModelParams = BaseLLMParams & 24 | Pick< 25 | TextGenerationInput, 26 | 'model_id' | 'prompt_id' | 'parameters' | 'moderations' 27 | > & { 28 | model_id: NonNullable; 29 | } & ( 30 | | { client: Client; configuration?: never } 31 | | { client?: never; configuration: Configuration } 32 | ); 33 | export type GenAIModelOptions = BaseLLMCallOptions & 34 | Partial>; 35 | 36 | export class GenAIModel extends BaseLLM { 37 | public readonly client: Client; 38 | 39 | public readonly modelId: GenAIModelParams['model_id']; 40 | public readonly promptId: GenAIModelParams['prompt_id']; 41 | public readonly parameters: GenAIModelParams['parameters']; 42 | public readonly moderations: GenAIModelParams['moderations']; 43 | 44 | constructor({ 45 | model_id, 46 | prompt_id, 47 | parameters, 48 | moderations, 49 | client, 50 | configuration, 51 | ...options 52 | }: GenAIModelParams) { 53 | super(options); 54 | 55 | this.modelId = model_id; 56 | this.promptId = prompt_id; 57 | this.parameters = parameters; 58 | this.moderations = moderations; 59 | this.client = client ?? new Client(configuration); 60 | } 61 | 62 | async _generate( 63 | inputs: string[], 64 | options: this['ParsedCallOptions'], 65 | runManager?: CallbackManagerForLLMRun, 66 | ): Promise { 67 | const outputs = await Promise.all( 68 | inputs.map((input) => 69 | this.client.text.generation.create( 70 | this._prepareRequest(input, options), 71 | { 72 | signal: options.signal, 73 | }, 74 | ), 75 | ), 76 | ); 77 | 78 | const generations = outputs.map((output) => 79 | output.results.map((result) => { 80 | const { generated_text, ...generationInfo } = result; 81 | return { text: generated_text, generationInfo }; 82 | }), 83 | ); 84 | 85 | const llmOutput = generations.flat().reduce( 86 | (acc, generation) => { 87 | acc.generated_token_count += 88 | generation.generationInfo.generated_token_count; 89 | acc.input_token_count += 90 | generation.generationInfo.input_token_count ?? 0; 91 | return acc; 92 | }, 93 | { 94 | generated_token_count: 0, 95 | input_token_count: 0, 96 | }, 97 | ); 98 | 99 | return { 100 | generations, 101 | llmOutput, 102 | }; 103 | } 104 | 105 | async *_streamResponseChunks( 106 | input: string, 107 | options: this['ParsedCallOptions'], 108 | runManager?: CallbackManagerForLLMRun, 109 | ): AsyncGenerator { 110 | const stream = await this.client.text.generation.create_stream( 111 | this._prepareRequest(input, options), 112 | { 113 | signal: options.signal, 114 | }, 115 | ); 116 | 117 | for await (const response of stream) { 118 | if (response.results) { 119 | for (const { generated_text, ...generationInfo } of response.results) { 120 | yield new GenerationChunk({ 121 | text: generated_text, 122 | generationInfo, 123 | }); 124 | void runManager?.handleText(generated_text); 125 | } 126 | } 127 | if (response.moderations) { 128 | yield new GenerationChunk({ 129 | text: '', 130 | generationInfo: { 131 | moderations: response.moderations, 132 | }, 133 | }); 134 | void runManager?.handleText(''); 135 | } 136 | } 137 | } 138 | 139 | private _prepareRequest( 140 | input: string, 141 | options: this['ParsedCallOptions'], 142 | ): TextGenerationInput { 143 | const stop_sequences = concatUnique( 144 | options.stop, 145 | options.parameters?.stop_sequences, 146 | ); 147 | const { model_id, prompt_id, ...rest } = merge( 148 | { 149 | model_id: this.modelId, 150 | prompt_id: this.promptId, 151 | moderations: this.moderations, 152 | parameters: this.parameters, 153 | }, 154 | { 155 | model_id: options.model_id, 156 | prompt_id: options.prompt_id, 157 | moderations: options.moderations, 158 | parameters: { 159 | ...options.parameters, 160 | stop_sequences, 161 | }, 162 | }, 163 | { input }, 164 | ); 165 | console.info(rest); 166 | return { 167 | ...(prompt_id ? { prompt_id } : { model_id }), 168 | ...rest, 169 | }; 170 | } 171 | 172 | async getNumTokens(input: string): Promise { 173 | const result = await this.client.text.tokenization.create({ 174 | ...(!isNullish(this.modelId) && { 175 | model_id: this.modelId, 176 | }), 177 | input, 178 | parameters: { 179 | return_options: { 180 | tokens: false, 181 | }, 182 | }, 183 | }); 184 | 185 | return result.results.at(0)?.token_count ?? 0; 186 | } 187 | 188 | static async fromJSON(value: string | Serialized, client?: Client) { 189 | const input = typeof value === 'string' ? value : JSON.stringify(value); 190 | return await load(input, { 191 | optionalImportsMap: { 192 | '@ibm-generative-ai/node-sdk/langchain/llm': { 193 | GenAIModel: GenAIModel, 194 | }, 195 | }, 196 | secretsMap: { 197 | client, 198 | }, 199 | }); 200 | } 201 | 202 | _modelType(): string { 203 | return this.modelId; 204 | } 205 | 206 | _llmType(): string { 207 | return 'GenAIModel'; 208 | } 209 | 210 | lc_serializable = true; 211 | lc_namespace = ['@ibm-generative-ai/node-sdk', 'langchain', 'llm']; 212 | 213 | get lc_id(): string[] { 214 | return [...this.lc_namespace, 'GenAIModel']; 215 | } 216 | 217 | lc_kwargs = { 218 | modelId: undefined, 219 | promptId: undefined, 220 | parameters: undefined, 221 | moderations: undefined, 222 | client: undefined, 223 | }; 224 | 225 | get lc_secrets() { 226 | return { ...super.lc_secrets, client: 'client' }; 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /tests/e2e/client.test.ts: -------------------------------------------------------------------------------- 1 | import range from 'lodash/range.js'; 2 | import { AbortError } from 'p-queue-compat'; 3 | 4 | import { Client } from '../../src/client.js'; 5 | import { HttpError } from '../../src/errors.js'; 6 | import { 7 | TextChatCreateStreamOutput, 8 | TextGenerationCreateStreamOutput, 9 | } from '../../src/schema.js'; 10 | import { isAbortError } from '../../src/utils/errors.js'; 11 | 12 | describe('client', () => { 13 | let client: Client; 14 | beforeAll(() => { 15 | client = new Client({ 16 | endpoint: process.env.ENDPOINT, 17 | apiKey: process.env.API_KEY, 18 | }); 19 | }); 20 | 21 | describe('generate', () => { 22 | test('should handle concurrency limits', async () => { 23 | const inputs = [...Array(20).keys()].map(() => ({ 24 | model_id: 'google/flan-ul2', 25 | input: 'Hello, World', 26 | })); 27 | 28 | const requests = inputs.map((input) => 29 | client.text.generation.create(input), 30 | ); 31 | 32 | expect.assertions(requests.length); 33 | for (const request of requests) { 34 | await expect(request).toResolve(); 35 | } 36 | }, 200_000); 37 | 38 | describe('streaming', () => { 39 | const makeValidStream = (input: Record = {}) => 40 | client.text.generation.create_stream({ 41 | model_id: 'google/flan-ul2', 42 | input: 'Hello, World', 43 | parameters: { 44 | max_new_tokens: 10, 45 | ...input.parameters, 46 | }, 47 | moderations: input.moderations, 48 | }); 49 | 50 | const validateStreamChunk = (chunk: TextGenerationCreateStreamOutput) => { 51 | const isNumberOrNull = (value: unknown) => 52 | value === null || !Number.isNaN(value); 53 | 54 | chunk.results?.forEach((result) => { 55 | expect(result).toBeObject(); 56 | expect(result.generated_token_count).not.toBeNegative(); 57 | expect(result.input_token_count).not.toBeNegative(); 58 | expect(result.stop_reason).toSatisfy(isNumberOrNull); 59 | }); 60 | expect(chunk.moderations).toBeOneOf([ 61 | undefined, 62 | expect.objectContaining({ hap: expect.any(Array) }), 63 | ]); 64 | }; 65 | 66 | // TODO remove skip after server bug is fixed or when schema is updated 67 | test.skip('should correctly process moderation chunks during streaming', async () => { 68 | const stream = await makeValidStream({ 69 | min_new_tokens: 1, 70 | max_new_tokens: 5, 71 | moderations: { 72 | hap: { 73 | input: true, 74 | threshold: 0.01, 75 | }, 76 | }, 77 | }); 78 | 79 | for await (const chunk of stream) { 80 | validateStreamChunk(chunk); 81 | if (chunk.moderations) { 82 | return; 83 | } 84 | } 85 | throw Error('No moderation chunks has been retrieved from the API'); 86 | }); 87 | 88 | test('should return valid stream for a single input', async () => { 89 | const stream = await makeValidStream(); 90 | 91 | const chunks = await new Promise( 92 | (resolve, reject) => { 93 | const chunks: TextGenerationCreateStreamOutput[] = []; 94 | stream.on('data', (chunk) => { 95 | validateStreamChunk(chunk); 96 | chunks.push(chunk); 97 | }); 98 | stream.on('close', () => { 99 | resolve(chunks); 100 | }); 101 | stream.on('error', (err) => { 102 | reject(err); 103 | }); 104 | }, 105 | ); 106 | 107 | expect(chunks.length).toBeGreaterThan(0); 108 | }, 15_000); 109 | 110 | test('should handle errors', async () => { 111 | const stream = await client.text.generation.create_stream({ 112 | model_id: 'XXX/XXX', 113 | input: 'Hello, World', 114 | }); 115 | 116 | await expect( 117 | new Promise((_, reject) => { 118 | stream.on('error', reject); 119 | }), 120 | ).rejects.toThrow(HttpError); 121 | }, 5_000); 122 | }); 123 | }); 124 | 125 | describe('chat', () => { 126 | describe('streaming', () => { 127 | const makeValidStream = () => 128 | client.text.chat.create_stream({ 129 | model_id: 'google/flan-ul2', 130 | messages: [{ role: 'user', content: 'Hello World!' }], 131 | }); 132 | 133 | const validateStreamChunk = (chunk: TextChatCreateStreamOutput) => { 134 | expect(chunk).toBeObject(); 135 | expect(chunk).toHaveProperty('conversation_id'); 136 | expect(chunk).toHaveProperty('results'); 137 | }; 138 | 139 | test('should return valid stream', async () => { 140 | const stream = await makeValidStream(); 141 | 142 | const chunks: TextChatCreateStreamOutput[] = []; 143 | for await (const chunk of stream) { 144 | validateStreamChunk(chunk); 145 | chunks.push(chunk); 146 | } 147 | 148 | expect(chunks.length).toBeGreaterThan(0); 149 | }, 15_000); 150 | 151 | test('should handle errors', async () => { 152 | const stream = await client.text.chat.create_stream({ 153 | model_id: 'XXX/XXX', 154 | messages: [{ role: 'user', content: 'Hello World!' }], 155 | }); 156 | 157 | await expect( 158 | new Promise((_, reject) => { 159 | stream.on('error', reject); 160 | }), 161 | ).rejects.toThrow(HttpError); 162 | }, 5_000); 163 | }); 164 | }); 165 | 166 | describe('error handling', () => { 167 | test('should reject with extended error for invalid model', async () => { 168 | await expect( 169 | client.text.generation.create({ 170 | model_id: 'invalid-model', 171 | input: 'Hello, World', 172 | }), 173 | ).rejects.toThrow(HttpError); 174 | }); 175 | 176 | test('should reject with ERR_CANCELED when aborted', async () => { 177 | const controller = new AbortController(); 178 | 179 | const generatePromise = client.text.generation.create( 180 | { 181 | model_id: 'google/flan-ul2', 182 | input: 'Hello, World', 183 | }, 184 | { signal: controller.signal }, 185 | ); 186 | 187 | setTimeout(() => { 188 | controller.abort(); 189 | }, 50); 190 | 191 | await expect(generatePromise).rejects.toBeInstanceOf(AbortError); 192 | }); 193 | 194 | test('should reject with ABORT_ERR when aborted (stream)', async () => { 195 | const controller = new AbortController(); 196 | setTimeout(() => { 197 | controller.abort(); 198 | }, 50); 199 | 200 | await expect(async () => { 201 | const stream = await client.text.generation.create_stream( 202 | { 203 | model_id: 'google/flan-ul2', 204 | input: 'Hello, World', 205 | }, 206 | { signal: controller.signal }, 207 | ); 208 | await new Promise((resolve, reject) => { 209 | stream.once('finish', resolve); 210 | stream.once('error', reject); 211 | }); 212 | }).rejects.toSatisfy(isAbortError); 213 | }); 214 | }); 215 | 216 | describe('limits', () => { 217 | test('should handle rate limits', async () => { 218 | const promise = Promise.all( 219 | range(0, 50).map(() => client.tune.types({})), 220 | ); 221 | await expect(promise).toResolve(); 222 | }); 223 | }); 224 | }); 225 | -------------------------------------------------------------------------------- /src/langchain/llm-chat.ts: -------------------------------------------------------------------------------- 1 | import { 2 | BaseChatModel, 3 | BaseChatModelParams, 4 | } from '@langchain/core/language_models/chat_models'; 5 | import { 6 | AIMessage, 7 | AIMessageChunk, 8 | BaseMessage, 9 | } from '@langchain/core/messages'; 10 | import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'; 11 | import { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'; 12 | import { BaseLanguageModelCallOptions as BaseChatModelCallOptions } from '@langchain/core/language_models/base'; 13 | import merge from 'lodash/merge.js'; 14 | import { load } from '@langchain/core/load'; 15 | import type { Serialized } from '@langchain/core/load/serializable'; 16 | 17 | import { Client, Configuration } from '../client.js'; 18 | import { TextChatCreateInput, TextChatCreateStreamInput } from '../schema.js'; 19 | import { InternalError, InvalidInputError } from '../errors.js'; 20 | 21 | type TextChatInput = TextChatCreateInput & TextChatCreateStreamInput; 22 | 23 | export type GenAIChatModelParams = BaseChatModelParams & 24 | Omit & { 25 | model_id: NonNullable; 26 | } & ( 27 | | { client: Client; configuration?: never } 28 | | { client?: never; configuration: Configuration } 29 | ); 30 | export type GenAIChatModelOptions = BaseChatModelCallOptions & 31 | Partial>; 32 | 33 | export class GenAIChatModel extends BaseChatModel { 34 | public readonly client: Client; 35 | 36 | public readonly modelId: GenAIChatModelParams['model_id']; 37 | public readonly promptId: GenAIChatModelParams['prompt_id']; 38 | public readonly conversationId: GenAIChatModelParams['conversation_id']; 39 | public readonly parameters: GenAIChatModelParams['parameters']; 40 | public readonly moderations: GenAIChatModelParams['moderations']; 41 | public readonly useConversationParameters: GenAIChatModelParams['use_conversation_parameters']; 42 | public readonly parentId: GenAIChatModelParams['parent_id']; 43 | public readonly trimMethod: GenAIChatModelParams['trim_method']; 44 | 45 | constructor({ 46 | model_id, 47 | prompt_id, 48 | conversation_id, 49 | parameters, 50 | moderations, 51 | parent_id, 52 | use_conversation_parameters, 53 | trim_method, 54 | client, 55 | configuration, 56 | ...options 57 | }: GenAIChatModelParams) { 58 | super(options); 59 | 60 | this.modelId = model_id; 61 | this.promptId = prompt_id; 62 | this.conversationId = conversation_id; 63 | this.parameters = parameters; 64 | this.moderations = moderations; 65 | this.parentId = parent_id; 66 | this.useConversationParameters = use_conversation_parameters; 67 | this.trimMethod = trim_method; 68 | this.client = client ?? new Client(configuration); 69 | } 70 | 71 | async _generate( 72 | messages: BaseMessage[], 73 | options: this['ParsedCallOptions'], 74 | _runManager?: CallbackManagerForLLMRun, 75 | ): Promise { 76 | const output = await this.client.text.chat.create( 77 | this._prepareRequest(messages, options), 78 | { signal: options.signal }, 79 | ); 80 | if (output.results.length !== 1) throw new InternalError('Invalid result'); 81 | const result = output.results[0]; 82 | if (result.input_token_count == null) 83 | throw new InternalError('Missing token count'); 84 | return { 85 | generations: [ 86 | { 87 | message: new AIMessage({ content: result.generated_text }), 88 | text: result.generated_text, 89 | generationInfo: { 90 | conversationId: output.conversation_id, 91 | inputTokens: result.input_tokens, 92 | generatedTokens: result.generated_tokens, 93 | seed: result.seed, 94 | stopReason: result.stop_reason, 95 | stopSequence: result.stop_sequence, 96 | moderation: result.moderation, 97 | }, 98 | }, 99 | ], 100 | llmOutput: { 101 | tokenUsage: { 102 | completionTokens: result.generated_token_count, 103 | promptTokens: result.input_token_count, 104 | totalTokens: result.generated_token_count + result.input_token_count, 105 | }, 106 | }, 107 | }; 108 | } 109 | 110 | async *_streamResponseChunks( 111 | messages: BaseMessage[], 112 | options: this['ParsedCallOptions'], 113 | _runManager?: CallbackManagerForLLMRun, 114 | ): AsyncGenerator { 115 | const outputStream = await this.client.text.chat.create_stream( 116 | this._prepareRequest(messages, options), 117 | { signal: options.signal }, 118 | ); 119 | for await (const output of outputStream) { 120 | if (output.results) { 121 | for (const result of output.results) { 122 | yield new ChatGenerationChunk({ 123 | message: new AIMessageChunk({ 124 | content: result.generated_text, 125 | }), 126 | text: result.generated_text, 127 | generationInfo: { 128 | conversationId: output.conversation_id, 129 | inputTokens: result.input_tokens, 130 | generatedTokens: result.generated_tokens, 131 | seed: result.seed, 132 | stopReason: result.stop_reason, 133 | stopSequence: result.stop_sequence, 134 | }, 135 | }); 136 | await _runManager?.handleText(result.generated_text); 137 | } 138 | } 139 | if (output.moderations) { 140 | yield new ChatGenerationChunk({ 141 | message: new AIMessageChunk({ 142 | content: '', 143 | }), 144 | text: '', 145 | generationInfo: { 146 | conversationId: output.conversation_id, 147 | moderation: output.moderations, 148 | }, 149 | }); 150 | await _runManager?.handleText(''); 151 | } 152 | } 153 | } 154 | 155 | private _prepareRequest( 156 | messages: BaseMessage[], 157 | options: this['ParsedCallOptions'], 158 | ) { 159 | const { 160 | conversation_id, 161 | model_id, 162 | prompt_id, 163 | use_conversation_parameters, 164 | parameters, 165 | ...rest 166 | } = merge( 167 | { 168 | conversation_id: this.conversationId, 169 | model_id: this.modelId, 170 | prompt_id: this.promptId, 171 | moderations: this.moderations, 172 | parameters: this.parameters, 173 | use_conversation_parameters: this.useConversationParameters, 174 | parent_id: this.parentId, 175 | trim_method: this.trimMethod, 176 | }, 177 | { 178 | conversation_id: options.conversation_id, 179 | model_id: options.model_id, 180 | prompt_id: options.prompt_id, 181 | moderations: options.moderations, 182 | parameters: options.parameters, 183 | use_conversation_parameters: options.use_conversation_parameters, 184 | parent_id: options.parent_id, 185 | trim_method: options.trim_method, 186 | }, 187 | { messages: this._convertMessages(messages) }, 188 | ); 189 | return { 190 | ...(conversation_id 191 | ? { conversation_id } 192 | : prompt_id 193 | ? { prompt_id } 194 | : { model_id }), 195 | ...(use_conversation_parameters 196 | ? { use_conversation_parameters } 197 | : { parameters }), 198 | ...rest, 199 | }; 200 | } 201 | 202 | private _convertMessages( 203 | messages: BaseMessage[], 204 | ): TextChatCreateInput['messages'] & TextChatCreateStreamInput['messages'] { 205 | return messages.map((message) => { 206 | const content = message.content; 207 | if (typeof content !== 'string') 208 | throw new InvalidInputError('Multimodal messages are not supported.'); 209 | const type = message._getType(); 210 | switch (type) { 211 | case 'system': 212 | return { content, role: 'system' }; 213 | case 'human': 214 | return { content, role: 'user' }; 215 | case 'ai': 216 | return { content, role: 'assistant' }; 217 | default: 218 | throw new InvalidInputError(`Unsupported message type "${type}"`); 219 | } 220 | }); 221 | } 222 | 223 | lc_serializable = true; 224 | lc_namespace = ['@ibm-generative-ai/node-sdk', 'langchain', 'llm-chat']; 225 | 226 | get lc_id(): string[] { 227 | return [...this.lc_namespace, 'GenAIChatModel']; 228 | } 229 | 230 | lc_kwargs = { 231 | modelId: undefined, 232 | promptId: undefined, 233 | conversationId: undefined, 234 | parameters: undefined, 235 | moderations: undefined, 236 | useConversationParameters: undefined, 237 | parentId: undefined, 238 | trimMethod: undefined, 239 | client: undefined, 240 | }; 241 | 242 | get lc_secrets() { 243 | return { ...super.lc_secrets, client: 'client' }; 244 | } 245 | 246 | static async fromJSON(value: string | Serialized, client?: Client) { 247 | const input = typeof value !== 'string' ? JSON.stringify(value) : value; 248 | return await load(input, { 249 | optionalImportsMap: { 250 | '@ibm-generative-ai/node-sdk/langchain/llm-chat': { 251 | GenAIModel: GenAIChatModel, 252 | }, 253 | }, 254 | secretsMap: { 255 | client, 256 | }, 257 | }); 258 | } 259 | 260 | _modelType(): string { 261 | return this.modelId; 262 | } 263 | 264 | _llmType(): string { 265 | return 'GenAIChatModel'; 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /tests/mocks/handlers.ts: -------------------------------------------------------------------------------- 1 | import { randomUUID } from 'node:crypto'; 2 | 3 | import { DefaultBodyType, MockedRequest, RestHandler, rest } from 'msw'; 4 | import _ from 'lodash'; 5 | 6 | export const MOCK_ENDPOINT = 'https://mock'; 7 | 8 | export let generateConfigStore: Record; 9 | export const resetGenerateConfigStore = () => { 10 | generateConfigStore = { 11 | model_id: 'foobar', 12 | }; 13 | }; 14 | 15 | export const generateStore = { 16 | generated_text: 'foobar', 17 | generated_token_count: 1, 18 | input_token_count: 5, 19 | stop_reason: 'TOKEN_LIMIT', 20 | }; 21 | 22 | export const tokenizeStore = { 23 | token_count: 2, 24 | tokens: ['foo', 'bar'], 25 | }; 26 | 27 | export const modelsStore = [ 28 | { 29 | id: 'foo/model', 30 | name: 'Foo model', 31 | size: '31B', 32 | token_limit: 123, 33 | tags: [], 34 | source_model_id: null, 35 | tasks: [], 36 | model_family: { 37 | id: 1, 38 | name: 'foobar', 39 | }, 40 | schema_generate: { id: 1, value: 'generate schema placeholder' }, 41 | schema_tokenize: { id: 1, value: 'tokenize schema placeholder' }, 42 | }, 43 | { 44 | id: 'bar/model', 45 | name: 'Bar model', 46 | size: '15B', 47 | token_limit: 456, 48 | tags: [], 49 | source_model_id: null, 50 | tasks: [], 51 | model_family: { 52 | id: 1, 53 | name: 'foobar', 54 | }, 55 | schema_generate: { id: 1, value: 'generate schema placeholder' }, 56 | schema_tokenize: { id: 1, value: 'tokenize schema placeholder' }, 57 | }, 58 | ]; 59 | 60 | export let tunesStore: any[]; 61 | export const resetTunesStore = () => { 62 | tunesStore = [ 63 | { 64 | id: 'foo', 65 | status: 'COMPLETED', 66 | assets: { 67 | vectors: 'encoderContent', 68 | logs: 'logsContent', 69 | }, 70 | }, 71 | { 72 | id: 'deleteme', 73 | status: 'PENDING', 74 | }, 75 | ]; 76 | }; 77 | 78 | export const tuneMethodsStore = [ 79 | { id: 'foo', name: 'Foo' }, 80 | { id: 'bar', name: 'Bar' }, 81 | ]; 82 | 83 | export let historyStore: any[]; 84 | export const resetHistoryStore = () => { 85 | historyStore = Array(2) 86 | .fill(null) 87 | .map((_, index) => ({ 88 | id: String(index + 1), 89 | duration: 431, 90 | request: { 91 | inputs: ['XXX'], 92 | model_id: 'aaa/bbb', 93 | parameters: { 94 | temperature: 0, 95 | max_new_tokens: 1, 96 | }, 97 | }, 98 | status: 'SUCCESS', 99 | created_at: '2022-12-19T22:53:22.000Z', 100 | response: { 101 | results: [ 102 | { 103 | generated_text: 'YYY', 104 | generated_token_count: 1, 105 | input_token_count: 2, 106 | stop_reason: 'MAX_TOKENS', 107 | }, 108 | ], 109 | model_id: 'aaa/bbb', 110 | created_at: '2022-12-19T22:53:22.358Z', 111 | }, 112 | })); 113 | }; 114 | 115 | export const chatStore = new Map(); 116 | export const resetChatStore = () => { 117 | chatStore.clear(); 118 | chatStore.set(randomUUID(), [ 119 | { role: 'system', content: 'instruction' }, 120 | { role: 'user', content: 'hello' }, 121 | { role: 'assistant', content: 'hi' }, 122 | ]); 123 | }; 124 | 125 | export const resetStores = () => { 126 | resetGenerateConfigStore(); 127 | resetTunesStore(); 128 | resetHistoryStore(); 129 | resetChatStore(); 130 | }; 131 | resetStores(); 132 | 133 | export const handlers: RestHandler>[] = [ 134 | // Generate Limits 135 | rest.get(`${MOCK_ENDPOINT}/v1/generate/limits`, (req, res, ctx) => 136 | res( 137 | ctx.status(200), 138 | ctx.json({ 139 | tokenCapacity: 100, 140 | tokensUsed: 0, 141 | }), 142 | ), 143 | ), 144 | 145 | // Generate 146 | rest.post(`${MOCK_ENDPOINT}/v2/text/generation`, async (req, res, ctx) => { 147 | const body = await req.json(); 148 | const inputs = Array.isArray(body.input) ? body.input : [body.input]; 149 | return res( 150 | ctx.status(200), 151 | ctx.json({ 152 | results: new Array(inputs.length).fill(generateStore), 153 | }), 154 | ); 155 | }), 156 | 157 | rest.get( 158 | `${MOCK_ENDPOINT}/v2/text/generation/limits`, 159 | async (req, res, ctx) => 160 | res( 161 | ctx.status(200), 162 | ctx.json({ 163 | result: { 164 | concurrency: { 165 | limit: 100, 166 | remaining: 100, 167 | }, 168 | }, 169 | }), 170 | ), 171 | ), 172 | 173 | // Tokenize 174 | rest.post(`${MOCK_ENDPOINT}/v2/text/tokenization`, async (req, res, ctx) => 175 | res( 176 | ctx.status(200), 177 | ctx.json({ 178 | results: [tokenizeStore], 179 | }), 180 | ), 181 | ), 182 | 183 | // Models 184 | rest.get(`${MOCK_ENDPOINT}/v2/models`, async (req, res, ctx) => 185 | res( 186 | ctx.status(200), 187 | ctx.json({ 188 | results: [...modelsStore, ...tunesStore].map( 189 | ({ id, name, size, token_limit }) => ({ 190 | id, 191 | name, 192 | size, 193 | token_limit, 194 | }), 195 | ), 196 | }), 197 | ), 198 | ), 199 | rest.get(`${MOCK_ENDPOINT}/v2/models/:id`, async (req, res, ctx) => { 200 | const model = [...modelsStore, ...tunesStore].find( 201 | (model) => model.id === req.params.id, 202 | ); 203 | if (!model) { 204 | return res(ctx.status(404)); 205 | } 206 | return res( 207 | ctx.status(200), 208 | ctx.json({ 209 | result: model, 210 | }), 211 | ); 212 | }), 213 | 214 | // Tunes 215 | rest.get(`${MOCK_ENDPOINT}/v2/tuning_types`, async (req, res, ctx) => 216 | res( 217 | ctx.status(200), 218 | ctx.json({ 219 | results: tuneMethodsStore, 220 | }), 221 | ), 222 | ), 223 | rest.get(`${MOCK_ENDPOINT}/v2/tunes`, async (req, res, ctx) => { 224 | const offset = parseInt(req.url.searchParams.get('offset') ?? '0'); 225 | const limit = parseInt(req.url.searchParams.get('limit') ?? '100'); 226 | const tunes = tunesStore.slice(offset, limit); 227 | return res( 228 | ctx.status(200), 229 | ctx.json({ 230 | results: tunes, 231 | total_count: tunes.length, 232 | }), 233 | ); 234 | }), 235 | rest.post(`${MOCK_ENDPOINT}/v2/tunes`, async (req, res, ctx) => { 236 | const body = await req.json(); 237 | const newTune = { ...body, id: randomUUID() }; 238 | tunesStore.push(newTune); 239 | return res( 240 | ctx.status(200), 241 | ctx.json({ 242 | result: newTune, 243 | }), 244 | ); 245 | }), 246 | rest.get(`${MOCK_ENDPOINT}/v2/tunes/:id`, async (req, res, ctx) => { 247 | const tune = tunesStore.find((tune: any) => tune.id === req.params.id); 248 | if (!tune) { 249 | return res(ctx.status(404)); 250 | } 251 | return res( 252 | ctx.status(200), 253 | ctx.json({ 254 | result: tune, 255 | }), 256 | ); 257 | }), 258 | rest.delete(`${MOCK_ENDPOINT}/v2/tunes/:id`, async (req, res, ctx) => { 259 | const tunesCount = tunesStore.length; 260 | tunesStore = tunesStore.filter((tune: any) => tune.id !== req.params.id); 261 | if (tunesCount === tunesStore.length) { 262 | res(ctx.status(404)); 263 | } 264 | return res(ctx.status(204)); 265 | }), 266 | rest.get( 267 | `${MOCK_ENDPOINT}/v2/tunes/:id/content/:type`, 268 | async (req, res, ctx) => { 269 | const tune = tunesStore.find((tune: any) => tune.id === req.params.id); 270 | if (!tune) { 271 | return res(ctx.status(404)); 272 | } 273 | const type = req.params.type as string; 274 | if (!['vectors', 'logger'].includes(type)) { 275 | return res(ctx.status(404)); 276 | } 277 | return res(ctx.status(200), ctx.body(tune.assets[type])); 278 | }, 279 | ), 280 | 281 | // History 282 | rest.get(`${MOCK_ENDPOINT}/v2/requests`, (req, res, ctx) => { 283 | const offset = parseInt(req.url.searchParams.get('offset') ?? '0'); 284 | const limit = parseInt(req.url.searchParams.get('limit') ?? '1'); 285 | 286 | return res( 287 | ctx.status(200), 288 | ctx.json({ 289 | results: historyStore.slice(offset, limit), 290 | total_count: historyStore.length, 291 | }), 292 | ); 293 | }), 294 | 295 | // Chat 296 | rest.post(`${MOCK_ENDPOINT}/v2/text/chat`, async (req, res, ctx) => { 297 | const body = await req.json(); 298 | const conversation_id = body.conversation_id ?? randomUUID(); 299 | if (!chatStore.has(conversation_id)) { 300 | chatStore.set(conversation_id, body.messages); 301 | } else { 302 | chatStore.get(conversation_id)?.push(...body.messages); 303 | } 304 | const conversation = chatStore.get(conversation_id); 305 | return res( 306 | ctx.status(200), 307 | ctx.json({ 308 | id: randomUUID(), 309 | model_id: body.model_id, 310 | created_at: new Date('2022-12-19T22:53:22.000Z'), 311 | conversation_id, 312 | results: conversation 313 | ?.slice(-1) 314 | .map(({ role, content }) => ({ role, generated_text: content })), 315 | }), 316 | ); 317 | }), 318 | 319 | // ERROR 320 | rest.get(`${MOCK_ENDPOINT}/error`, async (req, res, ctx) => 321 | res( 322 | ctx.status(500), 323 | ctx.json({ 324 | error: 'Any error', 325 | message: 'Any message', 326 | status_code: 500, 327 | }), 328 | ), 329 | ), 330 | ]; 331 | -------------------------------------------------------------------------------- /src/schema.ts: -------------------------------------------------------------------------------- 1 | import type { ApiClientOptions, ApiClientResponse } from './api/client.js'; 2 | import type { Empty, OmitVersion, Replace } from './utils/types.js'; 3 | 4 | type InputQueryWrapper = OmitVersion; // For some reason, `requestBody` is optional in the generated schema 5 | type InputBodyWrapper = NonNullable; // For some reason, `requestBody` is optional in the generated schema 6 | type OutputWrapper = NonNullable; // clientErrorWrapper ensures the output is defined 7 | 8 | // TextGenerationService 9 | 10 | export type TextGenerationCreateInput = InputBodyWrapper< 11 | ApiClientOptions<'POST', '/v2/text/generation'>['body'] 12 | >; 13 | export type TextGenerationCreateOutput = OutputWrapper< 14 | ApiClientResponse<'POST', '/v2/text/generation'>['data'] 15 | >; 16 | 17 | export type TextGenerationCreateStreamInput = InputBodyWrapper< 18 | ApiClientOptions<'POST', '/v2/text/generation_stream'>['body'] 19 | >; 20 | export type TextGenerationCreateStreamOutput = OutputWrapper< 21 | ApiClientResponse<'POST', '/v2/text/generation_stream'>['data'] 22 | >; 23 | 24 | // TextTokenizationService 25 | 26 | export type TextTokenizationCreateInput = InputBodyWrapper< 27 | ApiClientOptions<'POST', '/v2/text/tokenization'>['body'] 28 | >; 29 | export type TextTokenizationCreateOutput = OutputWrapper< 30 | ApiClientResponse<'POST', '/v2/text/tokenization'>['data'] 31 | >; 32 | 33 | // TextChatService 34 | 35 | export type TextChatCreateInput = InputBodyWrapper< 36 | ApiClientOptions<'POST', '/v2/text/chat'>['body'] 37 | >; 38 | export type TextChatCreateOutput = OutputWrapper< 39 | ApiClientResponse<'POST', '/v2/text/chat'>['data'] 40 | >; 41 | 42 | export type TextChatCreateStreamInput = InputBodyWrapper< 43 | ApiClientOptions<'POST', '/v2/text/chat_stream'>['body'] 44 | >; 45 | export type TextChatCreateStreamOutput = OutputWrapper< 46 | ApiClientResponse<'POST', '/v2/text/chat_stream'>['data'] 47 | >; 48 | 49 | // TextEmbeddingService 50 | 51 | export type TextEmbeddingCreateInput = InputBodyWrapper< 52 | ApiClientOptions<'POST', '/v2/text/embeddings'>['body'] 53 | >; 54 | export type TextEmbeddingCreateOutput = OutputWrapper< 55 | ApiClientResponse<'POST', '/v2/text/embeddings'>['data'] 56 | >; 57 | 58 | // TextSentenceSimilarityService 59 | 60 | export type TextSentenceSimilarityCreateInput = InputBodyWrapper< 61 | ApiClientOptions<'POST', '/v2/beta/text/sentence-similarity'>['body'] 62 | >; 63 | export type TextSentenceSimilarityCreateOutput = OutputWrapper< 64 | ApiClientResponse<'POST', '/v2/beta/text/sentence-similarity'>['data'] 65 | >; 66 | 67 | // ModelService 68 | 69 | export type ModelServiceListInput = InputQueryWrapper< 70 | ApiClientOptions<'GET', '/v2/models'>['params']['query'] 71 | >; 72 | export type ModelServiceListOutput = OutputWrapper< 73 | ApiClientResponse<'GET', '/v2/models'>['data'] 74 | >; 75 | 76 | export type ModelServiceRetrieveInput = ApiClientOptions< 77 | 'GET', 78 | '/v2/models/{id}' 79 | >['params']['path']; 80 | export type ModelServiceRetrieveOutput = OutputWrapper< 81 | ApiClientResponse<'GET', '/v2/models/{id}'>['data'] 82 | >; 83 | 84 | // RequestService 85 | 86 | export type RequestServiceListInput = InputQueryWrapper< 87 | ApiClientOptions<'GET', '/v2/requests'>['params']['query'] 88 | >; 89 | export type RequestServiceListOutput = OutputWrapper< 90 | ApiClientResponse<'GET', '/v2/requests'>['data'] 91 | >; 92 | 93 | export type RequestServiceDeleteInput = ApiClientOptions< 94 | 'DELETE', 95 | '/v2/requests/{id}' 96 | >['params']['path']; 97 | export type RequestServiceDeleteOutput = OutputWrapper< 98 | ApiClientResponse<'DELETE', '/v2/requests/{id}'>['data'] 99 | >; 100 | 101 | export type RequestServiceChatInput = ApiClientOptions< 102 | 'GET', 103 | '/v2/requests/chat/{conversation_id}' 104 | >['params']['path']; 105 | export type RequestServiceChatOutput = OutputWrapper< 106 | ApiClientResponse<'GET', '/v2/requests/chat/{conversation_id}'>['data'] 107 | >; 108 | 109 | // PromptService 110 | 111 | export type PromptServiceListInput = InputQueryWrapper< 112 | ApiClientOptions<'GET', '/v2/prompts'>['params']['query'] 113 | >; 114 | export type PromptServiceListOutput = OutputWrapper< 115 | ApiClientResponse<'GET', '/v2/prompts'>['data'] 116 | >; 117 | 118 | export type PromptServiceRetrieveInput = ApiClientOptions< 119 | 'GET', 120 | '/v2/prompts/{id}' 121 | >['params']['path']; 122 | export type PromptServiceRetrieveOutput = OutputWrapper< 123 | ApiClientResponse<'GET', '/v2/prompts/{id}'>['data'] 124 | >; 125 | 126 | export type PromptServiceCreateInput = InputBodyWrapper< 127 | ApiClientOptions<'POST', '/v2/prompts'>['body'] 128 | >; 129 | export type PromptServiceCreateOutput = OutputWrapper< 130 | ApiClientResponse<'POST', '/v2/prompts'>['data'] 131 | >; 132 | 133 | export type PromptServiceDeleteInput = ApiClientOptions< 134 | 'DELETE', 135 | '/v2/prompts/{id}' 136 | >['params']['path']; 137 | export type PromptServiceDeleteOutput = OutputWrapper< 138 | ApiClientResponse<'DELETE', '/v2/prompts/{id}'>['data'] 139 | >; 140 | 141 | // TuneService 142 | 143 | export type TuneServiceListInput = InputQueryWrapper< 144 | ApiClientOptions<'GET', '/v2/tunes'>['params']['query'] 145 | >; 146 | export type TuneServiceListOutput = OutputWrapper< 147 | ApiClientResponse<'GET', '/v2/tunes'>['data'] 148 | >; 149 | 150 | export type TuneServiceReadInput = ApiClientOptions< 151 | 'GET', 152 | '/v2/tunes/{id}/content/{type}' 153 | >['params']['path']; 154 | export type TuneServiceReadOutput = Blob; // TODO Replace with proper derivation 155 | 156 | export type TuneServiceRetrieveInput = ApiClientOptions< 157 | 'GET', 158 | '/v2/tunes/{id}' 159 | >['params']['path']; 160 | export type TuneServiceRetrieveOutput = OutputWrapper< 161 | ApiClientResponse<'GET', '/v2/tunes/{id}'>['data'] 162 | >; 163 | 164 | export type TuneServiceCreateInput = InputBodyWrapper< 165 | ApiClientOptions<'POST', '/v2/tunes'>['body'] 166 | >; 167 | export type TuneServiceCreateOutput = OutputWrapper< 168 | ApiClientResponse<'POST', '/v2/tunes'>['data'] 169 | >; 170 | 171 | export type TuneServiceDeleteInput = ApiClientOptions< 172 | 'DELETE', 173 | '/v2/tunes/{id}' 174 | >['params']['path']; 175 | export type TuneServiceDeleteOutput = OutputWrapper< 176 | ApiClientResponse<'DELETE', '/v2/tunes/{id}'>['data'] 177 | >; 178 | 179 | export type TuneServiceTypesInput = Empty; 180 | export type TuneServiceTypesOutput = OutputWrapper< 181 | ApiClientResponse<'GET', '/v2/tuning_types'>['data'] 182 | >; 183 | 184 | // UserService 185 | 186 | export type UserServiceCreateInput = InputBodyWrapper< 187 | ApiClientOptions<'POST', '/v2/user'>['body'] 188 | >; 189 | export type UserServiceCreateOutput = OutputWrapper< 190 | ApiClientResponse<'POST', '/v2/user'>['data'] 191 | >; 192 | 193 | export type UserServiceRetrieveInput = Empty; 194 | export type UserServiceRetrieveOutput = OutputWrapper< 195 | ApiClientResponse<'GET', '/v2/user'>['data'] 196 | >; 197 | 198 | export type UserServiceUpdateInput = InputBodyWrapper< 199 | ApiClientOptions<'PATCH', '/v2/user'>['body'] 200 | >; 201 | export type UserServiceUpdateOutput = OutputWrapper< 202 | ApiClientResponse<'PATCH', '/v2/user'>['data'] 203 | >; 204 | 205 | export type UserServiceDeleteInput = Empty; 206 | export type UserServiceDeleteOutput = OutputWrapper< 207 | ApiClientResponse<'DELETE', '/v2/user'>['data'] 208 | >; 209 | 210 | // FileService 211 | 212 | export type FileServiceCreateInput = Replace< 213 | InputBodyWrapper['body']>, 214 | { file: { content: Blob; name: string } } 215 | >; 216 | export type FileServiceCreateOutput = OutputWrapper< 217 | ApiClientResponse<'POST', '/v2/files'>['data'] 218 | >; 219 | 220 | export type FileServiceRetrieveInput = ApiClientOptions< 221 | 'GET', 222 | '/v2/files/{id}' 223 | >['params']['path']; 224 | export type FileServiceRetrieveOutput = OutputWrapper< 225 | ApiClientResponse<'GET', '/v2/files/{id}'>['data'] 226 | >; 227 | 228 | export type FileServiceReadInput = ApiClientOptions< 229 | 'GET', 230 | '/v2/files/{id}/content' 231 | >['params']['path']; 232 | export type FileServiceReadOutput = Blob; // TODO Replace with proper derivation 233 | 234 | export type FileServiceDeleteInput = ApiClientOptions< 235 | 'DELETE', 236 | '/v2/files/{id}' 237 | >['params']['path']; 238 | export type FileServiceDeleteOutput = OutputWrapper< 239 | ApiClientResponse<'DELETE', '/v2/files/{id}'>['data'] 240 | >; 241 | 242 | export type FileServiceListInput = InputQueryWrapper< 243 | ApiClientOptions<'GET', '/v2/files'>['params']['query'] 244 | >; 245 | export type FileServiceListOutput = OutputWrapper< 246 | ApiClientResponse<'GET', '/v2/files'>['data'] 247 | >; 248 | 249 | // SystemPromptService 250 | 251 | export type SystemPromptServiceCreateInput = InputBodyWrapper< 252 | ApiClientOptions<'POST', '/v2/system_prompts'>['body'] 253 | >; 254 | export type SystemPromptServiceCreateOutput = OutputWrapper< 255 | ApiClientResponse<'POST', '/v2/system_prompts'>['data'] 256 | >; 257 | 258 | export type SystemPromptServiceRetrieveInput = ApiClientOptions< 259 | 'GET', 260 | '/v2/system_prompts/{id}' 261 | >['params']['path']; 262 | export type SystemPromptServiceRetrieveOutput = OutputWrapper< 263 | ApiClientResponse<'GET', '/v2/system_prompts/{id}'>['data'] 264 | >; 265 | 266 | export type SystemPromptServiceUpdateInput = ApiClientOptions< 267 | 'PUT', 268 | '/v2/system_prompts/{id}' 269 | >['params']['path'] & 270 | InputBodyWrapper['body']>; 271 | export type SystemPromptServiceUpdateOutput = OutputWrapper< 272 | ApiClientResponse<'PUT', '/v2/system_prompts/{id}'>['data'] 273 | >; 274 | 275 | export type SystemPromptServiceDeleteInput = ApiClientOptions< 276 | 'DELETE', 277 | '/v2/system_prompts/{id}' 278 | >['params']['path']; 279 | export type SystemPromptServiceDeleteOutput = OutputWrapper< 280 | ApiClientResponse<'DELETE', '/v2/system_prompts/{id}'>['data'] 281 | >; 282 | 283 | export type SystemPromptServiceListInput = InputQueryWrapper< 284 | ApiClientOptions<'GET', '/v2/system_prompts'>['params']['query'] 285 | >; 286 | export type SystemPromptServiceListOutput = OutputWrapper< 287 | ApiClientResponse<'GET', '/v2/system_prompts'>['data'] 288 | >; 289 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IBM Generative AI Node.js SDK (Tech Preview) 2 | 3 | > [!TIP] 4 | > 5 | > Are you interested in AI Agents? Check out [bee-agent-framework](https://github.com/i-am-bee/bee-agent-framework) from IBM Research. 6 | 7 | This is not the [watsonx.ai](https://www.ibm.com/products/watsonx-ai) Node.js SDK. This is the Node.js SDK for the Tech Preview program for IBM Foundation Models Studio. This SDK brings IBM Generative AI (GenAI) into Node.js programs and provides useful operations and types. 8 | 9 | You can start a trial version or request a demo via https://www.ibm.com/products/watsonx-ai. 10 | 11 | This library provides convenient access to the Generative AI API from Node.js applications. For a full description of the API, please visit the [Tech Preview API Documentation](https://bam.res.ibm.com/docs/api-reference). 12 | 13 | The SDK supports both TypeScript and JavaScript as well as ESM and CommonJS. 14 | 15 | > Looking for the [watsonx.ai](https://www.ibm.com/products/watsonx-ai) Python SDK? Check out the [documentation](https://ibm.github.io/watsonx-ai-python-sdk/foundation_models.html). 16 | > Looking for the Python version? Check out [IBM Generative AI Python SDK](https://github.com/IBM/ibm-generative-ai). 17 | > Looking for a command-line interface? Check out [IBM Generative AI CLI](https://github.com/IBM/ibm-generative-ai-cli). 18 | 19 | ![-----------------------------------------------------](./docs/img/rainbow.png) 20 | 21 | ## Table of contents 22 | 23 | - [Key Features](#key-features) 24 | - [Prerequisites](#prerequisites) 25 | - [Installation](#installation) 26 | - [Usage](#usage) 27 | - [API Reference](#api-reference) 28 | - [Langchain](#langchain) 29 | - [Migration](#migration-from-v1) 30 | 31 | ![-----------------------------------------------------](./docs/img/rainbow.png) 32 | 33 | ## Key features 34 | 35 | - ⚡️ Performant - processes 1k of short inputs in under a minute 36 | - ☀️ Fault-tolerant - retry strategies and overflood protection 37 | - 🚦 Handles concurrency limiting - even if you have multiple parallel jobs running 38 | - 📌 Aligned with the REST API - clear structure that mirrors service endpoints and data 39 | - Integrations 40 | - ⛓️ LangChain - build applications with LLMs through composability 41 | 42 | ![-----------------------------------------------------](./docs/img/rainbow.png) 43 | 44 | ## SDK 45 | 46 | This is a hybrid package that supports both ESM and CommonJS, so you can use `import` or `require`. This package is Node.js only as using this in browser is not supported as it would expose your API key. 47 | 48 | ### Installation 49 | 50 | Install package using npm: 51 | 52 | ```shell 53 | npm install @ibm-generative-ai/node-sdk 54 | ``` 55 | 56 | Or using yarn: 57 | 58 | ```bash 59 | yarn add @ibm-generative-ai/node-sdk 60 | ``` 61 | 62 | ### Usage 63 | 64 | To use the SDK, first you need to create a client. API key can be passed to the client as parameter or by setting `GENAI_API_KEY` environment variable. 65 | 66 | ```typescript 67 | import { Client } from '@ibm-generative-ai/node-sdk'; 68 | 69 | const client = new Client({ apiKey: 'pak-.....' }); 70 | ``` 71 | 72 | Client contains various services backed by the REST API endpoints, select a service you'd like to use and call CRUDL-like methods on it. 73 | 74 | ```typescript 75 | const output = await client.text.generation.create({ 76 | model_id: 'google/flan-ul2', 77 | input: 'What is the capital of the United Kingdom?', 78 | }); 79 | ``` 80 | 81 | #### Streams 82 | 83 | Some services support output streaming, you can easily recognize streaming methods by their `_stream` suffix. 84 | 85 | ```typescript 86 | const stream = await client.text.generation.create_stream({ 87 | model_id: 'google/flan-ul2', 88 | input: 'What is the capital of the United Kingdom?', 89 | }); 90 | for await (const output of stream) { 91 | console.log(output); 92 | } 93 | ``` 94 | 95 | #### Cancellation 96 | 97 | All service methods support cancellation via [AbortSignal](https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal). Use the options argument to pass a signal into the method. 98 | 99 | ```typescript 100 | const output = await client.text.generation.create( 101 | { 102 | model_id: 'google/flan-ul2', 103 | input: 'What is the capital of the United Kingdom?', 104 | }, 105 | { signal: AbortSignal.timeout(5000) }, 106 | ); 107 | ``` 108 | 109 | Refer to [examples](./examples/) for further guidance. 110 | 111 | ### API Reference 112 | 113 | The SDK structure closely follows [REST API](https://bam.res.ibm.com/docs/api-reference) endpoints. To use the desired functionality, first locate a [service](./src/services/) and then call appropriate method on it. 114 | 115 | ```typescript 116 | // Signature template 117 | const output = await client.service[.subservice].method(input, options); 118 | 119 | // POST /v2/text/generation 120 | const output = await client.text.generation.create(input, options) 121 | ``` 122 | 123 | Input and output of each method is forwarded to the corresponding endpoint. The SDK exports [typing](./src/schema.ts) for each input and output. 124 | 125 | Standalone API reference is NOT available at the moment, please refer to the [REST API Reference](https://bam.res.ibm.com/docs/api-reference) to find the functionality you're looking for and the input/output semantics. 126 | 127 | ## LangChain 128 | 129 | [LangChain](https://js.langchain.com/docs/getting-started/guide-llm) is a framework for developing applications powered by language models. 130 | The following example showcases how you can integrate GenAI into your project. 131 | 132 | ```typescript 133 | import { Client } from '@ibm-generative-ai/node-sdk'; 134 | import { GenAIModel } from '@ibm-generative-ai/node-sdk/langchain'; 135 | 136 | const model = new GenAIModel({ 137 | modelId: 'google/flan-ul2', 138 | parameters: {}, 139 | client: new Client({ 140 | apiKey: 'pak-.....', 141 | }), 142 | }); 143 | ``` 144 | 145 | ### Basic usage 146 | 147 | ```typescript 148 | const response = await model.invoke( 149 | 'What would be a good company name a company that makes colorful socks?', 150 | ); 151 | 152 | console.log(response); // Fantasy Sockery 153 | ``` 154 | 155 | ### LLM Chain + Prompt Template 156 | 157 | ```typescript 158 | import { PromptTemplate } from '@langchain/core/prompts'; 159 | import { LLMChain } from 'langchain/chains'; 160 | 161 | const prompt = new PromptTemplate({ 162 | template: 'What is a good name for a company that makes {product}?', 163 | inputVariables: ['product'], 164 | }); 165 | // Another way: 166 | // const prompt = PromptTemplate.fromTemplate( 167 | // "What is a good name for a company that makes {product}?" 168 | // ); 169 | 170 | const chain = new LLMChain({ llm: model, prompt: prompt }); 171 | const { text } = await chain.call({ product: 'clothes' }); 172 | 173 | console.log(text); // ArcticAegis 174 | ``` 175 | 176 | ### Streaming 177 | 178 | ```typescript 179 | import { Client } from '@ibm-generative-ai/node-sdk'; 180 | import { GenAIModel } from '@ibm-generative-ai/node-sdk/langchain'; 181 | 182 | const model = new GenAIModel({ 183 | modelId: 'google/flan-ul2', 184 | stream: true, 185 | parameters: {}, 186 | client: new Client({ 187 | apiKey: 'pak-.....', 188 | }), 189 | }); 190 | 191 | await model.invoke('Tell me a joke.', { 192 | callbacks: [ 193 | { 194 | handleLLMNewToken(token) { 195 | console.log(token); 196 | }, 197 | }, 198 | ], 199 | }); 200 | ``` 201 | 202 | ### Chat support 203 | 204 | ```typescript 205 | import { Client } from '@ibm-generative-ai/node-sdk'; 206 | import { GenAIChatModel } from '@ibm-generative-ai/node-sdk/langchain'; 207 | import { SystemMessage, HumanMessage } from '@langchain/core/messages'; 208 | 209 | const client = new GenAIChatModel({ 210 | model_id: 'meta-llama/llama-3-70b-instruct', 211 | client: new Client({ 212 | endpoint: process.env.ENDPOINT, 213 | apiKey: process.env.API_KEY, 214 | }), 215 | parameters: { 216 | decoding_method: 'greedy', 217 | min_new_tokens: 10, 218 | max_new_tokens: 25, 219 | repetition_penalty: 1.5, 220 | }, 221 | }); 222 | 223 | const response = await client.invoke([ 224 | new SystemMessage( 225 | 'You are a helpful assistant that translates English to Spanish.', 226 | ), 227 | new HumanMessage('I love programming.'), 228 | ]); 229 | 230 | console.info(response.content); // "Me encanta la programación." 231 | ``` 232 | 233 | ### Prompt Templates (GenAI x LangChain) 234 | 235 | For using GenAI Prompt Template in LangChain, there needs to be a conversion between appropriate template syntaxes. 236 | This can be done via helper classes provided within our SDK. 237 | 238 | ```typescript 239 | import { GenAIPromptTemplate } from '@ibm-generative-ai/node-sdk/langchain'; 240 | import { PromptTemplate } from '@langchain/core/prompts'; 241 | 242 | // Converting the LangChain Prompt Template (f-string) to GenAI Prompt Template' 243 | const promptTemplate = GenAIPromptTemplate.fromLangChain( 244 | PromptTemplate.fromTemplate(`Tell me a {adjective} joke about {content}.`), 245 | ); 246 | console.log(promptTemplate); // "Tell me a {{adjective}} joke about {{content}}." 247 | 248 | // Converting the GenAI Prompt Template to LangChain Prompt Template 249 | const langChainPromptTemplate = GenAIPromptTemplate.toLangChain( 250 | `Tell me a {{adjective}} joke about {{content}}.`, 251 | ); 252 | 253 | console.log(langChainPromptTemplate); // "Tell me a {adjective} joke about {content}." 254 | ``` 255 | 256 | ![-----------------------------------------------------](./docs/img/rainbow.png) 257 | 258 | ## Migration from v1 259 | 260 | The interface ovehaul in v2 was thorough, almost everything has been affected. This means you have to revisit every usage of Node.js SDK and make necessary adjustments to the interface. On the bright side, you can achieve this mostly by following few simple steps. 261 | 262 | Let's say you were calling the following method to perform text generation: 263 | 264 | ```typescript 265 | const oldOutputs = await client.generate(oldInputs, { timeout }); 266 | ``` 267 | 268 | This interface changed as follows: 269 | 270 | 1. the method is nested inside a service 271 | 2. input and output structure has changed a bit 272 | 3. timeout has been replaced by signal 273 | 4. only a single input is accepted 274 | 275 | The new equivalent usage is then 276 | 277 | ```typescript 278 | const signal = AbortSignal.timeout(timeout); 279 | const output = await Promise.all( 280 | inputs.map((input) => client.text.generation.create(input, { signal })), 281 | ); 282 | ``` 283 | 284 | Additional migration tips: 285 | 286 | - output streaming now has a separate method (e.g. `create_stream`) 287 | - binary I/O is done using [Blobs](https://nodejs.org/api/buffer.html#class-blob) 288 | - callback interface is no longer supported, use [callbackify](https://nodejs.org/api/util.html#utilcallbackifyoriginal) wrapper if you have to 289 | 290 | ![-----------------------------------------------------](./docs/img/rainbow.png) 291 | --------------------------------------------------------------------------------