├── .env.example
├── .prettierrc
├── docs
├── .nojekyll
├── assets
│ ├── navigation.js
│ └── highlight.css
├── types
│ ├── types.FileContent.html
│ ├── types.RetrieveModelResponse.html
│ └── types.LogLevel.html
├── modules
│ ├── openai_client.html
│ └── errors.html
├── hierarchy.html
└── interfaces
│ ├── types.ErrorResponse.html
│ ├── types.EditChoice.html
│ ├── types.ImageData.html
│ ├── types.UploadFileOptions.html
│ ├── types.ImageResponse.html
│ ├── types.ListModelsResponse.html
│ ├── types.ListFilesResponse.html
│ ├── types.ListFineTuneEventsResponse.html
│ ├── types.CreateModerationOptions.html
│ ├── types.ChatMessage.html
│ ├── types.ContextEntry.html
│ ├── types.EmbeddingData.html
│ ├── types.ModerationResponse.html
│ ├── types.CreateEmbeddingOptions.html
│ └── types.ModerationResult.html
├── tsconfig.example.json
├── jest.config.js
├── tsconfig.json
├── .eslintrc.json
├── .github
└── workflows
│ └── deploy-docs.yml
├── LICENSE
├── src
├── errors.ts
└── types.ts
├── package.json
├── .gitignore
├── examples
└── example.ts
├── README.md
└── tests
└── openai-client.test.ts
/.env.example:
--------------------------------------------------------------------------------
1 | #define the .env to run the examples and tests
2 | OPENAI_API_KEY=your_openai_api_key_here
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "semi": true,
3 | "singleQuote": true,
4 | "trailingComma": "es5",
5 | "printWidth": 100,
6 | "tabWidth": 2
7 | }
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
1 | TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false.
--------------------------------------------------------------------------------
/tsconfig.example.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "outDir": "./dist-example",
4 | "module": "commonjs",
5 | "target": "es6",
6 | "esModuleInterop": true,
7 | "skipLibCheck": true
8 | },
9 | "include": [
10 | "examples/example.ts"
11 | ]
12 | }
--------------------------------------------------------------------------------
/jest.config.js:
--------------------------------------------------------------------------------
1 | // jest.config.js
2 | module.exports = {
3 | preset: 'ts-jest',
4 | testEnvironment: 'node',
5 | moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
6 | testMatch: ['**/tests/**/*.test.ts'],
7 | transform: {
8 | '^.+\\.tsx?$': 'ts-jest',
9 | },
10 | };
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES6",
4 | "module": "commonjs",
5 | "outDir": "./dist",
6 | "strict": true,
7 | "esModuleInterop": true,
8 | "forceConsistentCasingInFileNames": true,
9 | "skipLibCheck": true,
10 | "resolveJsonModule": true,
11 | "removeComments": true,
12 | "noUnusedLocals": true,
13 | "noUnusedParameters": true,
14 | "baseUrl": "./",
15 | "paths": {
16 | "*": ["src/*"]
17 | }
18 | },
19 | "include": ["src/**/*", "tests/**/*"]
20 | }
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "browser": false,
4 | "es2021": true,
5 | "node": true,
6 | "jest": true
7 | },
8 | "extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended", "prettier"],
9 | "parser": "@typescript-eslint/parser",
10 | "parserOptions": {
11 | "ecmaVersion": "latest",
12 | "sourceType": "module"
13 | },
14 | "plugins": ["@typescript-eslint", "prettier", "jest"],
15 | "rules": {
16 | "prettier/prettier": "error",
17 | "@typescript-eslint/no-explicit-any": "off",
18 | "@typescript-eslint/explicit-module-boundary-types": "off"
19 | }
20 | }
--------------------------------------------------------------------------------
/.github/workflows/deploy-docs.yml:
--------------------------------------------------------------------------------
1 | name: Deploy Docs to GitHub Pages
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | build:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v2
15 |
16 | - name: Set up Node.js
17 | uses: actions/setup-node@v2
18 | with:
19 | node-version: '20.10.0'
20 |
21 | - name: Install dependencies
22 | run: npm install
23 |
24 | - name: Generate docs
25 | run: npm run docs
26 |
27 | - name: Deploy to GitHub Pages
28 | uses: peaceiris/actions-gh-pages@v3
29 | with:
30 | github_token: ${{ secrets.GITHUB_TOKEN }}
31 | publish_dir: ./docs
--------------------------------------------------------------------------------
/docs/assets/navigation.js:
--------------------------------------------------------------------------------
1 | window.navigationData = "data:application/octet-stream;base64,H4sIAAAAAAAAE52XXWvbMBSG/4uus48Guo/clSyFQkpGtvWmlKJap7E2WTL2SVgZ/e/DdhJLtjk66l2wn/O88pGsWPf/BMJfFAsBVeWqWsxEKTEXC1E4tTdQf+iuv8+xMGIm/mirxGI+E1mujarAisX9WXH1/WbV0L0kM7Kue8kJCG0X8y+vs16yxxws6kyidjbiG7OUelOCvYoM0WMo1VYirHWhkbaFGCW8k0YrxiMPuLHywZO6EqzU7zKjweJ4arvbj91t1gwreJZ7g+PRhaojRg8OX0qYWG/tZd5y2yvtNmXTC8+jLUL1LLOzyscG3stP3niWucSlK0oDDbrMnc6A0E7hfP0W6tLZmh9wKuBH/MAKZJEcFJbF4m6hruUuZj9SpCyh8yldT+p4Wrff0Om3dNnZ5tfKYvVCqnuM1FUgEcIpj79DRBUjLD0oPWSlNHL1HssQF0+glLY7tn1QEI+41hZ+7i1wEwZ8POCmkLukDg0LmBFJ+jT1nay0TFlDk1XxsFunoEoKGlVQIU1PoxtcD8VUjD3Hx0jdadl+kygpn8+xhJxBDllS3Hz4cKQ+RwmvtYHN02/IkLD1EK3qXs3VIfjimrB5HEfIeN4hSmnbtyMy02cmKmIMLuAo4VrX2PS6ZkhHbFzstZ2fMFUUi2q2BcONCGFS7XY71p9RCFLKNpwwtfdjgm4HZDzuGGarg5MHLR6dPkJtd8pbtqeVeCsnaEr+qzROqna/iKpHbGyfar/z/K2l9Xh71BEYeD5+/XxxOQ/X0RoO/rz7otPdmGULWGk4tP+BZjz5vnISnfY/vD78B1EWHhyYEAAA"
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Bruno Garcia
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/src/errors.ts:
--------------------------------------------------------------------------------
1 | // src/errors.ts
2 |
3 | export class OpenAIError extends Error {
4 | statusCode?: number;
5 | data?: any;
6 |
7 | constructor(message: string, statusCode?: number, data?: any) {
8 | super(message);
9 | this.name = 'OpenAIError';
10 | this.statusCode = statusCode;
11 | this.data = data;
12 | Object.setPrototypeOf(this, new.target.prototype);
13 | }
14 | }
15 |
16 | export class AuthenticationError extends OpenAIError {
17 | constructor(message: string, statusCode?: number, data?: any) {
18 | super(message, statusCode, data);
19 | this.name = 'AuthenticationError';
20 | }
21 | }
22 |
23 | export class ValidationError extends OpenAIError {
24 | constructor(message: string, statusCode?: number, data?: any) {
25 | super(message, statusCode, data);
26 | this.name = 'ValidationError';
27 | }
28 | }
29 |
30 | export class RateLimitError extends OpenAIError {
31 | constructor(message: string, statusCode?: number, data?: any) {
32 | super(message, statusCode, data);
33 | this.name = 'RateLimitError';
34 | }
35 | }
36 |
37 | export class APIError extends OpenAIError {
38 | constructor(message: string, statusCode?: number, data?: any) {
39 | super(message, statusCode, data);
40 | this.name = 'APIError';
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "openai-enhanced-sdk",
3 | "version": "1.0.2",
4 | "description": "An advanced TypeScript SDK for OpenAI API with built-in context management, proxy support, streaming, and enhanced error handling. Includes logging, retry mechanism, and fully typed responses for seamless AI integration.",
5 | "main": "dist/openai-client.js",
6 | "scripts": {
7 | "build": "tsc",
8 | "build:example": "tsc -p tsconfig.example.json",
9 | "start:example": "npm run build:example && node dist-example/example.js",
10 | "test": "jest",
11 | "lint": "eslint 'src/**/*.{ts,tsx}'",
12 | "lint:fix": "eslint 'src/**/*.{ts,tsx}' --fix",
13 | "prettier": "prettier --write 'src/**/*.{ts,tsx,json,md}'",
14 | "docs": "typedoc --out docs src/ --name 'OpenAI Enhanced SDK' --entryPointStrategy expand"
15 | },
16 | "dependencies": {
17 | "axios": "^1.4.0",
18 | "axios-retry": "^3.3.1",
19 | "dotenv": "^16.3.1",
20 | "form-data": "^4.0.1",
21 | "https-proxy-agent": "^7.0.5",
22 | "winston": "^3.8.2"
23 | },
24 | "devDependencies": {
25 | "@types/axios": "^0.14.0",
26 | "@types/jest": "^29.5.13",
27 | "@types/node": "^20.4.2",
28 | "@types/winston": "^2.4.4",
29 | "@typescript-eslint/eslint-plugin": "^7.18.0",
30 | "eslint": "^8.44.0",
31 | "eslint-config-prettier": "^8.8.0",
32 | "eslint-plugin-import": "^2.27.5",
33 | "eslint-plugin-jest": "^27.2.3",
34 | "eslint-plugin-prettier": "^5.2.1",
35 | "jest": "^29.5.0",
36 | "nock": "^13.3.0",
37 | "prettier": "^3.3.3",
38 | "ts-jest": "^29.2.5",
39 | "typedoc": "^0.26.8",
40 | "typescript": "^5.1.6"
41 | },
42 | "files": [
43 | "dist",
44 | "README.md",
45 | "LICENSE"
46 | ],
47 | "repository": {
48 | "type": "git",
49 | "url": "git+https://github.com/bgarciaoliveira/openai-enhanced-sdk.git"
50 | },
51 | "keywords": [
52 | "AI",
53 | "API",
54 | "Axios",
55 | "Chat",
56 | "Context",
57 | "Error",
58 | "GPT-4",
59 | "GPT-4o",
60 | "OpenAI",
61 | "Proxy",
62 | "Retry",
63 | "SDK",
64 | "Streaming",
65 | "TypeScript",
66 | "client",
67 | "completions",
68 | "embeddings",
69 | "handling",
70 | "integration",
71 | "logging",
72 | "management",
73 | "mechanism",
74 | "mini",
75 | "o1",
76 | "o1-mini",
77 | "o1-preview",
78 | "support"
79 | ],
80 | "author": "bgarciaoliveira",
81 | "license": "MIT",
82 | "bugs": {
83 | "url": "https://github.com/bgarciaoliveira/openai-enhanced-sdk/issues"
84 | },
85 | "homepage": "https://github.com/bgarciaoliveira/openai-enhanced-sdk#readme"
86 | }
87 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | lerna-debug.log*
8 | .pnpm-debug.log*
9 |
10 | # Diagnostic reports (https://nodejs.org/api/report.html)
11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
12 |
13 | # Runtime data
14 | pids
15 | *.pid
16 | *.seed
17 | *.pid.lock
18 |
19 | # Directory for instrumented libs generated by jscoverage/JSCover
20 | lib-cov
21 |
22 | # Coverage directory used by tools like istanbul
23 | coverage
24 | *.lcov
25 |
26 | # nyc test coverage
27 | .nyc_output
28 |
29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
30 | .grunt
31 |
32 | # Bower dependency directory (https://bower.io/)
33 | bower_components
34 |
35 | # node-waf configuration
36 | .lock-wscript
37 |
38 | # Compiled binary addons (https://nodejs.org/api/addons.html)
39 | build/Release
40 |
41 | # Dependency directories
42 | node_modules/
43 | jspm_packages/
44 |
45 | # Snowpack dependency directory (https://snowpack.dev/)
46 | web_modules/
47 |
48 | # TypeScript cache
49 | *.tsbuildinfo
50 |
51 | # Optional npm cache directory
52 | .npm
53 |
54 | # Optional eslint cache
55 | .eslintcache
56 |
57 | # Optional stylelint cache
58 | .stylelintcache
59 |
60 | # Microbundle cache
61 | .rpt2_cache/
62 | .rts2_cache_cjs/
63 | .rts2_cache_es/
64 | .rts2_cache_umd/
65 |
66 | # Optional REPL history
67 | .node_repl_history
68 |
69 | # Output of 'npm pack'
70 | *.tgz
71 |
72 | # Yarn Integrity file
73 | .yarn-integrity
74 |
75 | # dotenv environment variable files
76 | .env
77 | .env.development.local
78 | .env.test.local
79 | .env.production.local
80 | .env.local
81 |
82 | # parcel-bundler cache (https://parceljs.org/)
83 | .cache
84 | .parcel-cache
85 |
86 | # Next.js build output
87 | .next
88 | out
89 |
90 | # Nuxt.js build / generate output
91 | .nuxt
92 | dist
93 |
94 | # Gatsby files
95 | .cache/
96 | # Comment in the public line in if your project uses Gatsby and not Next.js
97 | # https://nextjs.org/blog/next-9-1#public-directory-support
98 | # public
99 |
100 | # vuepress build output
101 | .vuepress/dist
102 |
103 | # vuepress v2.x temp and cache directory
104 | .temp
105 | .cache
106 |
107 | # Docusaurus cache and generated files
108 | .docusaurus
109 |
110 | # Serverless directories
111 | .serverless/
112 |
113 | # FuseBox cache
114 | .fusebox/
115 |
116 | # DynamoDB Local files
117 | .dynamodb/
118 |
119 | # TernJS port file
120 | .tern-port
121 |
122 | # Stores VSCode versions used for testing VSCode extensions
123 | .vscode-test
124 |
125 | # yarn v2
126 | .yarn/cache
127 | .yarn/unplugged
128 | .yarn/build-state.yml
129 | .yarn/install-state.gz
130 | .pnp.*
131 |
132 | # MacOS
133 | .DS_Store
134 |
135 | # Environment variables
136 | .env
--------------------------------------------------------------------------------
/docs/assets/highlight.css:
--------------------------------------------------------------------------------
1 | :root {
2 | --light-hl-0: #795E26;
3 | --dark-hl-0: #DCDCAA;
4 | --light-hl-1: #000000;
5 | --dark-hl-1: #D4D4D4;
6 | --light-hl-2: #A31515;
7 | --dark-hl-2: #CE9178;
8 | --light-hl-3: #AF00DB;
9 | --dark-hl-3: #C586C0;
10 | --light-hl-4: #001080;
11 | --dark-hl-4: #9CDCFE;
12 | --light-hl-5: #0000FF;
13 | --dark-hl-5: #569CD6;
14 | --light-hl-6: #0070C1;
15 | --dark-hl-6: #4FC1FF;
16 | --light-hl-7: #098658;
17 | --dark-hl-7: #B5CEA8;
18 | --light-hl-8: #267F99;
19 | --dark-hl-8: #4EC9B0;
20 | --light-hl-9: #008000;
21 | --dark-hl-9: #6A9955;
22 | --light-hl-10: #000000;
23 | --dark-hl-10: #C8C8C8;
24 | --light-code-background: #FFFFFF;
25 | --dark-code-background: #1E1E1E;
26 | }
27 |
28 | @media (prefers-color-scheme: light) { :root {
29 | --hl-0: var(--light-hl-0);
30 | --hl-1: var(--light-hl-1);
31 | --hl-2: var(--light-hl-2);
32 | --hl-3: var(--light-hl-3);
33 | --hl-4: var(--light-hl-4);
34 | --hl-5: var(--light-hl-5);
35 | --hl-6: var(--light-hl-6);
36 | --hl-7: var(--light-hl-7);
37 | --hl-8: var(--light-hl-8);
38 | --hl-9: var(--light-hl-9);
39 | --hl-10: var(--light-hl-10);
40 | --code-background: var(--light-code-background);
41 | } }
42 |
43 | @media (prefers-color-scheme: dark) { :root {
44 | --hl-0: var(--dark-hl-0);
45 | --hl-1: var(--dark-hl-1);
46 | --hl-2: var(--dark-hl-2);
47 | --hl-3: var(--dark-hl-3);
48 | --hl-4: var(--dark-hl-4);
49 | --hl-5: var(--dark-hl-5);
50 | --hl-6: var(--dark-hl-6);
51 | --hl-7: var(--dark-hl-7);
52 | --hl-8: var(--dark-hl-8);
53 | --hl-9: var(--dark-hl-9);
54 | --hl-10: var(--dark-hl-10);
55 | --code-background: var(--dark-code-background);
56 | } }
57 |
58 | :root[data-theme='light'] {
59 | --hl-0: var(--light-hl-0);
60 | --hl-1: var(--light-hl-1);
61 | --hl-2: var(--light-hl-2);
62 | --hl-3: var(--light-hl-3);
63 | --hl-4: var(--light-hl-4);
64 | --hl-5: var(--light-hl-5);
65 | --hl-6: var(--light-hl-6);
66 | --hl-7: var(--light-hl-7);
67 | --hl-8: var(--light-hl-8);
68 | --hl-9: var(--light-hl-9);
69 | --hl-10: var(--light-hl-10);
70 | --code-background: var(--light-code-background);
71 | }
72 |
73 | :root[data-theme='dark'] {
74 | --hl-0: var(--dark-hl-0);
75 | --hl-1: var(--dark-hl-1);
76 | --hl-2: var(--dark-hl-2);
77 | --hl-3: var(--dark-hl-3);
78 | --hl-4: var(--dark-hl-4);
79 | --hl-5: var(--dark-hl-5);
80 | --hl-6: var(--dark-hl-6);
81 | --hl-7: var(--dark-hl-7);
82 | --hl-8: var(--dark-hl-8);
83 | --hl-9: var(--dark-hl-9);
84 | --hl-10: var(--dark-hl-10);
85 | --code-background: var(--dark-code-background);
86 | }
87 |
88 | .hl-0 { color: var(--hl-0); }
89 | .hl-1 { color: var(--hl-1); }
90 | .hl-2 { color: var(--hl-2); }
91 | .hl-3 { color: var(--hl-3); }
92 | .hl-4 { color: var(--hl-4); }
93 | .hl-5 { color: var(--hl-5); }
94 | .hl-6 { color: var(--hl-6); }
95 | .hl-7 { color: var(--hl-7); }
96 | .hl-8 { color: var(--hl-8); }
97 | .hl-9 { color: var(--hl-9); }
98 | .hl-10 { color: var(--hl-10); }
99 | pre, code { background: var(--code-background); }
100 |
--------------------------------------------------------------------------------
/examples/example.ts:
--------------------------------------------------------------------------------
1 | // example.ts
2 |
3 | import dotenv from 'dotenv';
4 | import OpenAIClient from '../src/openai-client';
5 | import { OpenAIError } from '../src/errors';
6 | import { HttpsProxyAgent } from 'https-proxy-agent';
7 | import { ContextEntry, CreateChatCompletionOptions, CreateImageOptions } from '../src/types';
8 |
9 | dotenv.config();
10 |
11 | const apiKey = process.env.OPENAI_API_KEY || '';
12 | const client = new OpenAIClient(apiKey, {
13 | loggingOptions: {
14 | logLevel: 'info',
15 | logToFile: false,
16 | },
17 | });
18 |
19 | (async () => {
20 | try {
21 | // === Proxy Configuration Example ===
22 |
23 | const proxyAgent = new HttpsProxyAgent('http://localhost:8080');
24 |
25 | const proxyClient = new OpenAIClient(apiKey, {
26 | proxyConfig: proxyAgent,
27 | loggingOptions: {
28 | logLevel: 'info',
29 | },
30 | });
31 |
32 | const models = await proxyClient.listModels();
33 | console.log('\nModels using Proxy:');
34 | console.log(models);
35 |
36 | // === Context Management ===
37 |
38 | // Clear any existing context
39 | client.clearContext();
40 |
41 | // Add initial context entries
42 | client.addToContext({
43 | role: 'system',
44 | content: 'You are a helpful assistant.',
45 | });
46 |
47 | client.addToContext({
48 | role: 'user',
49 | content: 'Tell me about the solar system.',
50 | });
51 |
52 | // Add a batch of context entries
53 | const contextBatch: ContextEntry[] = [
54 | {
55 | role: 'assistant',
56 | content: 'The solar system consists of the Sun and the objects that orbit it.',
57 | },
58 | {
59 | role: 'user',
60 | content: 'Can you tell me about Mars?',
61 | },
62 | ];
63 | client.addBatchToContext(contextBatch);
64 |
65 | // Create a chat completion with context
66 | const chatOptions: CreateChatCompletionOptions = {
67 | model: 'gpt-3.5-turbo',
68 | messages: [{ role: 'user', content: 'What is the atmosphere like on Mars?' }],
69 | };
70 |
71 | const chatResponse = await client.createChatCompletion(chatOptions);
72 | console.log('\nChat Completion Response with Context:');
73 | console.log(chatResponse);
74 |
75 | // Get the current context
76 | const currentContext = client.getContext();
77 | console.log('\nCurrent Context:');
78 | console.log(currentContext);
79 |
80 | // Clear the context
81 | client.clearContext();
82 |
83 | // === Create Completion ===
84 |
85 | const completionOptions = {
86 | model: 'text-davinci-003',
87 | prompt: 'Once upon a time',
88 | max_tokens: 5,
89 | };
90 |
91 | const completion = await client.createCompletion(completionOptions);
92 | console.log('\nCompletion Response:');
93 | console.log(completion);
94 |
95 | // === Create Embedding ===
96 |
97 | const embeddingOptions = {
98 | model: 'text-embedding-ada-002',
99 | input: 'OpenAI is an AI research lab.',
100 | };
101 |
102 | const embedding = await client.createEmbedding(embeddingOptions);
103 | console.log('\nEmbedding Response:');
104 | console.log(embedding);
105 |
106 | // === Create Image ===
107 |
108 | const imageOptions: CreateImageOptions = {
109 | prompt: 'A sunset over the mountains',
110 | n: 1,
111 | size: '512x512',
112 | };
113 |
114 | const image = await client.createImage(imageOptions);
115 | console.log('\nImage Response:');
116 | console.log(image);
117 |
118 | // === Error Handling Example ===
119 |
120 | try {
121 | const invalidCompletion = await client.createCompletion({
122 | model: 'invalid-model',
123 | prompt: 'Hello, world!',
124 | });
125 | } catch (error) {
126 | if (error instanceof OpenAIError) {
127 | console.error('\nHandled OpenAI Error:', error.message);
128 | } else {
129 | console.error('\nUnhandled Error:', error);
130 | }
131 | }
132 | } catch (error) {
133 | console.error('Error:', error);
134 | }
135 | })();
--------------------------------------------------------------------------------
/docs/types/types.FileContent.html:
--------------------------------------------------------------------------------
1 |
FileContent | OpenAI Enhanced SDK
2 |
--------------------------------------------------------------------------------
/docs/types/types.RetrieveModelResponse.html:
--------------------------------------------------------------------------------
1 | RetrieveModelResponse | OpenAI Enhanced SDKType Alias RetrieveModelResponse
RetrieveModelResponse: Model
2 |
--------------------------------------------------------------------------------
/docs/modules/openai_client.html:
--------------------------------------------------------------------------------
1 | openai-client | OpenAI Enhanced SDK
3 |
--------------------------------------------------------------------------------
/docs/hierarchy.html:
--------------------------------------------------------------------------------
1 | OpenAI Enhanced SDKOpenAI Enhanced SDK
Class Hierarchy
2 |
--------------------------------------------------------------------------------
/docs/types/types.LogLevel.html:
--------------------------------------------------------------------------------
1 | LogLevel | OpenAI Enhanced SDKLogLevel:
| "error"
| "warn"
| "info"
| "http"
| "verbose"
| "debug"
| "silly"
2 |
--------------------------------------------------------------------------------
/docs/modules/errors.html:
--------------------------------------------------------------------------------
1 | errors | OpenAI Enhanced SDK
7 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ErrorResponse.html:
--------------------------------------------------------------------------------
1 | ErrorResponse | OpenAI Enhanced SDKinterface ErrorResponse { error?: { message?: string; }; } Properties
Optionalerror
error?: {
message?: string;
}
3 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://www.npmjs.com/package/openai-enhanced-sdk)
2 | [](https://github.com/bgarciaoliveira/openai-enhanced-sdk/actions)
3 | [](LICENSE)
4 |
5 | # OpenAI TypeScript Enhanced SDK
6 |
7 | OpenAI Enhanced SDK is a **fully typed** TypeScript SDK that facilitates integration with the OpenAI API. It offers features that the official SDK does not have, such as context management for conversations, proxy support, automatic request retry mechanism, detailed logging system, among others.
8 | Additionally, the SDK implements most of the functionalities provided by the OpenAI API.
9 | *Note: This is an unofficial SDK and is not affiliated with OpenAI.*
10 |
11 | ## Table of Contents
12 |
13 | - [Features](#features)
14 | - [Installation](#installation)
15 | - [Getting Started](#getting-started)
16 | - [Initialization](#initialization)
17 | - [Authentication](#authentication)
18 | - [Usage Examples](#usage-examples)
19 | - [Context Management](#context-management)
20 | - [Proxy Configuration](#proxy-configuration)
21 | - [List Models](#list-models)
22 | - [Create Completion](#create-completion)
23 | - [Create Chat Completion](#create-chat-completion)
24 | - [Create Embedding](#create-embedding)
25 | - [Create Image](#create-image)
26 | - [Error Handling](#error-handling)
27 | - [Configuration](#configuration)
28 | - [Logging](#logging)
29 | - [Testing](#testing)
30 | - [Documentation](#documentation)
31 | - [Contributing](#contributing)
32 | - [License](#license)
33 |
34 | ## Features
35 |
36 | - **Complete API Coverage**: Implements all major OpenAI API endpoints.
37 | - **Context Management**: Manage conversation context easily for chat completions.
38 | - **Streaming Support**: Supports streaming for completions and chat completions.
39 | - **Robust Error Handling**: Provides custom error classes for different error types.
40 | - **TypeScript Support**: Includes comprehensive type definitions.
41 | - **Logging**: Configurable logging using Winston.
42 | - **Retry Mechanism**: Built-in retry logic using `axios-retry`.
43 | - **Proxy Support**: Enhanced proxy configuration for flexible network setups.
44 | - **Extensible**: Easily extendable for future API endpoints.
45 |
46 | ## Installation
47 |
48 | Install the package via npm:
49 |
50 | ```bash
51 | npm install openai-enhanced-sdk
52 | ```
53 |
54 | ## Getting Started
55 |
56 | ### Initialization
57 | ```typescript
58 | import OpenAIClient from '../src/openai-client';
59 | import { HttpsProxyAgent } from 'https-proxy-agent';
60 |
61 | const apiKey = process.env.OPENAI_API_KEY;
62 |
63 | // Proxy agent configuration
64 | const proxyAgent = new HttpsProxyAgent('http://proxy.example.com:8080');
65 |
66 | const client = new OpenAIClient(apiKey, {
67 | baseURL: 'https://api.openai.com/v1',
68 | timeout: 10000,
69 | proxyConfig: proxyAgent,
70 | axiosConfig: {
71 | headers: {
72 | 'Custom-Header': 'custom-value',
73 | },
74 | },
75 | axiosRetryConfig: {
76 | retries: 5,
77 | retryDelay: 2000,
78 | },
79 | loggingOptions: {
80 | logLevel: 'info',
81 | logToFile: true,
82 | logFilePath: 'logs/openai-client.log',
83 | },
84 | });
85 | ```
86 |
87 | ### Authentication
88 |
89 | Ensure you have your OpenAI API key stored securely, preferably in an environment variable:
90 |
91 | ```bash
92 | export OPENAI_API_KEY=your_api_key_here
93 | ```
94 |
95 | ## Usage Examples
96 |
97 | ### Context Management
98 |
99 | #### Add a single entry to context
100 |
101 | ```typescript
102 | client.addToContext({
103 | role: 'system',
104 | content: 'You are a helpful assistant.',
105 | });
106 | ```
107 |
108 | #### Add multiple entries to context
109 |
110 | ```typescript
111 | const contextEntries = [
112 | {
113 | role: 'user',
114 | content: 'Tell me a joke.',
115 | },
116 | {
117 | role: 'assistant',
118 | content: 'Why did the chicken cross the road? To get to the other side!',
119 | },
120 | ];
121 | client.addBatchToContext(contextEntries);
122 | ```
123 |
124 | #### Get current context
125 |
126 | ```typescript
127 | const context = client.getContext();
128 | console.log(context);
129 | ```
130 |
131 | #### Clear context
132 |
133 | ```typescript
134 | client.clearContext();
135 | ```
136 |
137 | #### Use context in chat completion
138 |
139 | ```typescript
140 | const response = await client.createChatCompletion({
141 | model: 'gpt-3.5-turbo',
142 | messages: [{ role: 'user', content: 'Do I need an umbrella today?' }],
143 | });
144 | console.log(response);
145 | ```
146 |
147 | ### Proxy Configuration
148 |
149 | #### Using Proxy with Custom HTTPS Agent
150 |
151 | ```typescript
152 | import HttpsProxyAgent from 'https-proxy-agent';
153 |
154 | const proxyAgent = new HttpsProxyAgent('http://localhost:8080');
155 |
156 | const proxyClient = new OpenAIClient(apiKey, {
157 | proxyConfig: proxyAgent,
158 | });
159 | ```
160 |
161 | ### List Models
162 |
163 | ```typescript
164 | const models = await client.listModels();
165 | console.log(models);
166 | ```
167 |
168 | ### Create Completion
169 |
170 | ```typescript
171 | const completion = await client.createCompletion({
172 | model: 'text-davinci-003',
173 | prompt: 'Once upon a time',
174 | max_tokens: 5,
175 | });
176 | console.log(completion);
177 | ```
178 |
179 | ### Create Chat Completion
180 |
181 | ```typescript
182 | const chatCompletion = await client.createChatCompletion({
183 | model: 'gpt-3.5-turbo',
184 | messages: [{ role: 'user', content: 'Hello, how are you?' }],
185 | });
186 | console.log(chatCompletion);
187 | ```
188 |
189 | ### Create Embedding
190 |
191 | ```typescript
192 | const embedding = await client.createEmbedding({
193 | model: 'text-embedding-ada-002',
194 | input: 'OpenAI is an AI research lab.',
195 | });
196 | console.log(embedding);
197 | ```
198 |
199 | ### Create Image
200 |
201 | ```typescript
202 | const image = await client.createImage({
203 | prompt: 'A sunset over the mountains',
204 | n: 1,
205 | size: '512x512',
206 | });
207 | console.log(image);
208 | ```
209 |
210 | ### Error Handling
211 |
212 | ```typescript
213 | try {
214 | const completion = await client.createCompletion({
215 | model: 'text-davinci-003',
216 | prompt: 'Hello, world!',
217 | });
218 | } catch (error) {
219 | if (error instanceof AuthenticationError) {
220 | console.error('Authentication Error:', error.message);
221 | } else if (error instanceof ValidationError) {
222 | console.error('Validation Error:', error.message);
223 | } else if (error instanceof RateLimitError) {
224 | console.error('Rate Limit Exceeded:', error.message);
225 | } else if (error instanceof APIError) {
226 | console.error('API Error:', error.message);
227 | } else {
228 | console.error('Unknown Error:', error);
229 | }
230 | }
231 | ```
232 |
233 | ## Configuration
234 |
235 | You can customize the client using the `OpenAIClientOptions` interface:
236 |
237 | ```typescript
238 | import HttpsProxyAgent from 'https-proxy-agent';
239 |
240 | const proxyAgent = new HttpsProxyAgent('http://localhost:8080');
241 |
242 | const client = new OpenAIClient(apiKey, {
243 | baseURL: 'https://api.openai.com/v1',
244 | timeout: 10000, // 10 seconds timeout
245 | proxyConfig: proxyAgent,
246 | loggingOptions: {
247 | logLevel: 'debug',
248 | logToFile: true,
249 | logFilePath: './logs/openai-sdk.log',
250 | },
251 | axiosRetryConfig: {
252 | retries: 5,
253 | retryDelay: axiosRetry.exponentialDelay,
254 | },
255 | });
256 | ```
257 |
258 | ## Logging
259 |
260 | The SDK uses Winston for logging. You can configure logging levels and outputs:
261 |
262 | ```typescript
263 | loggingOptions: {
264 | logLevel: 'info', // 'error' | 'warn' | 'info' | 'debug'
265 | logToFile: true,
266 | logFilePath: './logs/openai-sdk.log',
267 | }
268 | ```
269 |
270 | ## Testing
271 |
272 | The SDK includes comprehensive unit tests using Jest. To run the tests:
273 |
274 | ```bash
275 | npm run test
276 | ```
277 |
278 | ## Documentation
279 |
280 | For more detailed information, please refer to the [OpenAI Enhanced SDK Documentation](https://bgarciaoliveira.github.io/openai-enhanced-sdk/index.html).
281 |
282 | ## Contributing
283 |
284 | Contributions are welcome! Please open an issue or submit a pull request on GitHub.
285 |
286 | ## License
287 |
288 | This project is licensed under the MIT License.
289 |
290 | ---
291 |
292 | If you have any questions or need assistance, feel free to reach out!
--------------------------------------------------------------------------------
/docs/interfaces/types.EditChoice.html:
--------------------------------------------------------------------------------
1 | EditChoice | OpenAI Enhanced SDKinterface EditChoice { index: number; text: string; } Properties
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ImageData.html:
--------------------------------------------------------------------------------
1 | ImageData | OpenAI Enhanced SDKinterface ImageData { b64_json?: string; url?: string; } Properties
Optionalb64_json
b64_json?: string
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.UploadFileOptions.html:
--------------------------------------------------------------------------------
1 | UploadFileOptions | OpenAI Enhanced SDKInterface UploadFileOptions
Properties
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ImageResponse.html:
--------------------------------------------------------------------------------
1 | ImageResponse | OpenAI Enhanced SDK
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ListModelsResponse.html:
--------------------------------------------------------------------------------
1 | ListModelsResponse | OpenAI Enhanced SDKInterface ListModelsResponse
Properties
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ListFilesResponse.html:
--------------------------------------------------------------------------------
1 | ListFilesResponse | OpenAI Enhanced SDKInterface ListFilesResponse
Properties
4 |
--------------------------------------------------------------------------------
/tests/openai-client.test.ts:
--------------------------------------------------------------------------------
1 | // tests/openai-client.test.ts
2 |
3 | import OpenAIClient from '../src/openai-client';
4 | import { config } from 'dotenv';
5 | import nock from 'nock';
6 | import { HttpsProxyAgent } from 'https-proxy-agent';
7 | import { ContextEntry, CreateChatCompletionOptions } from 'types';
8 |
9 | config();
10 |
11 | describe('OpenAIClient', () => {
12 | const apiKey = process.env.OPENAI_API_KEY || 'test-api-key';
13 | const client = new OpenAIClient(apiKey, {
14 | loggingOptions: { logLevel: 'error' },
15 | });
16 |
17 | beforeAll(() => {
18 | nock.disableNetConnect();
19 | });
20 |
21 | afterAll(() => {
22 | nock.enableNetConnect();
23 | nock.cleanAll();
24 | });
25 |
26 | // === Proxy Configuration Tests ===
27 |
28 | test('should use proxy configuration when provided', async () => {
29 | const proxyClient = new OpenAIClient(apiKey, {
30 | proxyConfig: new HttpsProxyAgent('http://localhost:8080'),
31 | });
32 |
33 | nock('https://api.openai.com')
34 | .get('/v1/models')
35 | .reply(200, { data: [], object: 'list' });
36 |
37 | const models = await proxyClient.listModels();
38 | expect(models).toEqual({ data: [], object: 'list' });
39 | });
40 |
41 | // === Context Management Tests ===
42 |
43 | test('should add a single entry to context', () => {
44 | const contextEntry: ContextEntry = {
45 | role: 'user',
46 | content: 'Hello, assistant!',
47 | };
48 | client.addToContext(contextEntry);
49 | const context = client.getContext();
50 | expect(context).toContainEqual(contextEntry);
51 | });
52 |
53 | test('should add multiple entries to context', () => {
54 | client.clearContext();
55 | const contextEntries: ContextEntry[] = [
56 | {
57 | role: 'system',
58 | content: 'You are a helpful assistant.',
59 | },
60 | {
61 | role: 'user',
62 | content: 'Tell me a joke.',
63 | },
64 | ];
65 | client.addBatchToContext(contextEntries);
66 | const context = client.getContext();
67 | expect(context).toEqual(contextEntries);
68 | });
69 |
70 | test('should throw error when adding invalid context entry', () => {
71 | const invalidEntry = {
72 | role: 'invalid',
73 | content: 'This should fail.',
74 | };
75 | expect(() => client.addToContext(invalidEntry as any)).toThrow(
76 | 'Context entry must be an object with role ("system", "user", or "assistant") and content properties'
77 | );
78 | });
79 |
80 | test('should throw error when adding invalid batch context entries', () => {
81 | const invalidEntries = [
82 | {
83 | role: 'user',
84 | content: 'This is valid.',
85 | },
86 | {
87 | role: 'invalid',
88 | content: 'This should fail.',
89 | },
90 | ];
91 | expect(() => client.addBatchToContext(invalidEntries as any)).toThrow(
92 | 'Context entry must be an object with role ("system", "user", or "assistant") and content properties'
93 | );
94 | });
95 |
96 | test('should clear context', () => {
97 | client.clearContext();
98 | const context = client.getContext();
99 | expect(context).toEqual([]);
100 | });
101 |
102 | test('should include context in createChatCompletion', async () => {
103 | client.clearContext();
104 | const contextEntries: ContextEntry[] = [
105 | {
106 | role: 'system',
107 | content: 'You are a helpful assistant.',
108 | },
109 | {
110 | role: 'user',
111 | content: 'What is the weather today?',
112 | },
113 | ];
114 | client.addBatchToContext(contextEntries);
115 |
116 | const options: CreateChatCompletionOptions = {
117 | model: 'gpt-3.5-turbo',
118 | messages: [{ role: 'user', content: 'Do I need an umbrella?' }],
119 | };
120 |
121 | const expectedMessages = [...contextEntries, ...options.messages];
122 |
123 | const mockResponse = {
124 | id: 'chat-completion-id',
125 | object: 'chat.completion',
126 | created: 1234567890,
127 | model: 'gpt-3.5-turbo',
128 | choices: [
129 | {
130 | index: 0,
131 | message: {
132 | role: 'assistant',
133 | content: 'Yes, you should take an umbrella today.',
134 | },
135 | finish_reason: 'stop',
136 | },
137 | ],
138 | usage: {
139 | prompt_tokens: 20,
140 | completion_tokens: 10,
141 | total_tokens: 30,
142 | },
143 | };
144 |
145 | // Mock the API call and capture the request payload
146 | let requestPayload: any;
147 | nock('https://api.openai.com')
148 | .post('/v1/chat/completions', (body) => {
149 | requestPayload = body;
150 | return true;
151 | })
152 | .reply(200, mockResponse);
153 |
154 | const response = await client.createChatCompletion(options);
155 | expect(response).toEqual(mockResponse);
156 |
157 | expect(requestPayload.messages).toEqual(expectedMessages);
158 | });
159 |
160 | // === Completion Tests ===
161 |
162 | test('should create a completion', async () => {
163 | const completionOptions = {
164 | model: 'text-davinci-003',
165 | prompt: 'Hello, world!',
166 | };
167 |
168 | const mockResponse = {
169 | id: 'completion-id',
170 | object: 'text_completion',
171 | created: 1234567890,
172 | model: 'text-davinci-003',
173 | choices: [
174 | {
175 | text: 'Hello to you too!',
176 | index: 0,
177 | logprobs: null,
178 | finish_reason: 'stop',
179 | },
180 | ],
181 | usage: {
182 | prompt_tokens: 5,
183 | completion_tokens: 5,
184 | total_tokens: 10,
185 | },
186 | };
187 |
188 | nock('https://api.openai.com')
189 | .post('/v1/completions', completionOptions)
190 | .reply(200, mockResponse);
191 |
192 | const response = await client.createCompletion(completionOptions);
193 | expect(response).toEqual(mockResponse);
194 | });
195 |
196 | // === Embedding Tests ===
197 |
198 | test('should create an embedding', async () => {
199 | const embeddingOptions = {
200 | model: 'text-embedding-ada-002',
201 | input: 'OpenAI is an AI research lab.',
202 | };
203 |
204 | const mockResponse = {
205 | object: 'list',
206 | data: [
207 | {
208 | object: 'embedding',
209 | embedding: [0.1, 0.2, 0.3],
210 | index: 0,
211 | },
212 | ],
213 | model: 'text-embedding-ada-002',
214 | usage: {
215 | prompt_tokens: 5,
216 | total_tokens: 5,
217 | },
218 | };
219 |
220 | nock('https://api.openai.com')
221 | .post('/v1/embeddings', embeddingOptions)
222 | .reply(200, mockResponse);
223 |
224 | const response = await client.createEmbedding(embeddingOptions);
225 | expect(response).toEqual(mockResponse);
226 | });
227 |
228 | // === Moderation Tests ===
229 |
230 | test('should create a moderation', async () => {
231 | const moderationOptions = {
232 | input: 'Some potentially unsafe content.',
233 | };
234 |
235 | const mockResponse = {
236 | id: 'modr-id',
237 | model: 'text-moderation-001',
238 | results: [
239 | {
240 | flagged: false,
241 | categories: {
242 | hate: false,
243 | 'hate/threatening': false,
244 | harassment: false,
245 | 'self-harm': false,
246 | sexual: false,
247 | 'sexual/minors': false,
248 | violence: false,
249 | 'violence/graphic': false,
250 | },
251 | category_scores: {
252 | hate: 0.0,
253 | 'hate/threatening': 0.0,
254 | harassment: 0.0,
255 | 'self-harm': 0.0,
256 | sexual: 0.0,
257 | 'sexual/minors': 0.0,
258 | violence: 0.0,
259 | 'violence/graphic': 0.0,
260 | },
261 | },
262 | ],
263 | };
264 |
265 | nock('https://api.openai.com')
266 | .post('/v1/moderations', moderationOptions)
267 | .reply(200, mockResponse);
268 |
269 | const response = await client.createModeration(moderationOptions);
270 | expect(response).toEqual(mockResponse);
271 | });
272 |
273 | // === Error Handling Tests ===
274 |
275 | test('should handle authentication errors', async () => {
276 | const completionOptions = {
277 | model: 'text-davinci-003',
278 | prompt: 'Hello, world!',
279 | };
280 |
281 | nock('https://api.openai.com')
282 | .post('/v1/completions', completionOptions)
283 | .reply(401, { error: { message: 'Unauthorized' } });
284 |
285 | await expect(client.createCompletion(completionOptions)).rejects.toThrow('Unauthorized');
286 | });
287 |
288 | test('should handle validation errors', async () => {
289 | const completionOptions = {
290 | model: 'invalid-model',
291 | prompt: 'Hello, world!',
292 | };
293 |
294 | nock('https://api.openai.com')
295 | .post('/v1/completions', completionOptions)
296 | .reply(400, { error: { message: 'Invalid model' } });
297 |
298 | await expect(client.createCompletion(completionOptions)).rejects.toThrow('Invalid model');
299 | });
300 | });
--------------------------------------------------------------------------------
/docs/interfaces/types.ListFineTuneEventsResponse.html:
--------------------------------------------------------------------------------
1 | ListFineTuneEventsResponse | OpenAI Enhanced SDKInterface ListFineTuneEventsResponse
Properties
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.CreateModerationOptions.html:
--------------------------------------------------------------------------------
1 | CreateModerationOptions | OpenAI Enhanced SDKInterface CreateModerationOptions
interface CreateModerationOptions { input: string | string[]; model?: string; } Properties
input
input: string | string[]
Optionalmodel
model?: string
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ChatMessage.html:
--------------------------------------------------------------------------------
1 | ChatMessage | OpenAI Enhanced SDKinterface ChatMessage { content: string; role: "system" | "user" | "assistant"; } Properties
role
role: "system" | "user" | "assistant"
4 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ContextEntry.html:
--------------------------------------------------------------------------------
1 | ContextEntry | OpenAI Enhanced SDKinterface ContextEntry { content: string; role: "system" | "user" | "assistant"; } Properties
role
role: "system" | "user" | "assistant"
4 |
--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------
1 | // src/types.ts
2 |
3 | import { AxiosRequestConfig } from 'axios';
4 | import { Format } from 'logform';
5 |
6 | //
7 | // Logging Configuration
8 | //
9 |
10 | export type LogLevel = 'error' | 'warn' | 'info' | 'http' | 'verbose' | 'debug' | 'silly';
11 |
12 | export interface LoggingOptions {
13 | /**
14 | * Logging level ('error', 'warn', 'info', 'http', 'verbose', 'debug', 'silly').
15 | */
16 | logLevel?: LogLevel;
17 |
18 | /**
19 | * Enable logging to a file.
20 | */
21 | logToFile?: boolean;
22 |
23 | /**
24 | * Path to the log file.
25 | */
26 | logFilePath?: string;
27 |
28 | /**
29 | * Custom logging format for Winston logger.
30 | */
31 | logFormat?: Format;
32 | }
33 |
34 | //
35 | // OpenAI Client Options
36 | //
37 |
38 | export interface OpenAIClientOptions {
39 | /**
40 | * Base URL for the API.
41 | */
42 | baseURL?: string;
43 |
44 | /**
45 | * Request timeout in milliseconds.
46 | */
47 | timeout?: number;
48 |
49 | /**
50 | * Proxy configuration (deprecated in favor of proxyConfig).
51 | */
52 | proxy?: AxiosRequestConfig['proxy'];
53 |
54 | /**
55 | * Enhanced proxy configuration using an HTTPS agent.
56 | */
57 | proxyConfig?: any;
58 |
59 | /**
60 | * Logging configuration options.
61 | */
62 | loggingOptions?: LoggingOptions;
63 |
64 | /**
65 | * Custom axios-retry configuration (plain object type).
66 | */
67 | axiosRetryConfig?: Record;
68 |
69 | /**
70 | * Custom Axios configuration options.
71 | */
72 | axiosConfig?: AxiosRequestConfig;
73 | }
74 |
75 | //
76 | // Context Management
77 | //
78 |
79 | export interface ContextEntry {
80 | role: 'system' | 'user' | 'assistant';
81 | content: string;
82 | }
83 |
84 | //
85 | // Models
86 | //
87 |
88 | export interface Model {
89 | id: string;
90 | object: string;
91 | created: number;
92 | owned_by: string;
93 | permission: any[];
94 | root: string;
95 | parent: string | null;
96 | }
97 |
98 | export interface ListModelsResponse {
99 | data: Model[];
100 | object: string;
101 | }
102 |
103 | export type RetrieveModelResponse = Model;
104 |
105 | //
106 | // Completions
107 | //
108 |
109 | export interface CompletionChoice {
110 | text: string;
111 | index: number;
112 | logprobs: any | null;
113 | finish_reason: string;
114 | }
115 |
116 | export interface CompletionResponse {
117 | id: string;
118 | object: string;
119 | created: number;
120 | model: string;
121 | choices: CompletionChoice[];
122 | usage: {
123 | prompt_tokens: number;
124 | completion_tokens: number;
125 | total_tokens: number;
126 | };
127 | }
128 |
129 | // Parameters for createCompletion
130 | export interface CreateCompletionOptions {
131 | model: string;
132 | prompt?: string | string[];
133 | suffix?: string;
134 | max_tokens?: number;
135 | temperature?: number;
136 | top_p?: number;
137 | n?: number;
138 | stream?: boolean;
139 | logprobs?: number | null;
140 | echo?: boolean;
141 | stop?: string | string[] | null;
142 | presence_penalty?: number;
143 | frequency_penalty?: number;
144 | best_of?: number;
145 | logit_bias?: Record;
146 | user?: string;
147 | }
148 |
149 | // Streaming Completion Response
150 | export interface CompletionStreamResponse {
151 | id: string;
152 | object: 'text_completion';
153 | created: number;
154 | model: string;
155 | choices: Array<{
156 | text: string;
157 | index: number;
158 | logprobs: any | null;
159 | finish_reason: string | null;
160 | }>;
161 | }
162 |
163 | //
164 | // Chat Completions
165 | //
166 |
167 | export interface ChatMessage {
168 | role: 'system' | 'user' | 'assistant';
169 | content: string;
170 | }
171 |
172 | export interface ChatCompletionChoice {
173 | index: number;
174 | message?: ChatMessage;
175 | delta?: Partial;
176 | finish_reason: string | null;
177 | }
178 |
179 | export interface ChatCompletionResponse {
180 | id: string;
181 | object: string;
182 | created: number;
183 | model: string;
184 | choices: ChatCompletionChoice[];
185 | usage: {
186 | prompt_tokens: number;
187 | completion_tokens: number;
188 | total_tokens: number;
189 | };
190 | }
191 |
192 | // Parameters for createChatCompletion
193 | export interface CreateChatCompletionOptions {
194 | model: string;
195 | messages: ChatMessage[];
196 | max_tokens?: number;
197 | temperature?: number;
198 | top_p?: number;
199 | n?: number;
200 | stream?: boolean;
201 | stop?: string | string[];
202 | presence_penalty?: number;
203 | frequency_penalty?: number;
204 | logit_bias?: Record;
205 | user?: string;
206 | }
207 |
208 | // Streaming Chat Completion Response
209 | export interface ChatCompletionStreamResponse {
210 | id: string;
211 | object: 'chat.completion.chunk';
212 | created: number;
213 | model: string;
214 | choices: Array<{
215 | delta: Partial;
216 | index: number;
217 | finish_reason: string | null;
218 | }>;
219 | }
220 |
221 | //
222 | // Embeddings
223 | //
224 |
225 | export interface EmbeddingData {
226 | object: string;
227 | embedding: number[];
228 | index: number;
229 | }
230 |
231 | export interface EmbeddingResponse {
232 | object: string;
233 | data: EmbeddingData[];
234 | model: string;
235 | usage: {
236 | prompt_tokens: number;
237 | total_tokens: number;
238 | };
239 | }
240 |
241 | // Parameters for createEmbedding
242 | export interface CreateEmbeddingOptions {
243 | model: string;
244 | input: string | string[];
245 | user?: string;
246 | }
247 |
248 | //
249 | // Moderations
250 | //
251 |
252 | export interface ModerationResult {
253 | flagged: boolean;
254 | categories: Record;
255 | category_scores: Record;
256 | }
257 |
258 | export interface ModerationResponse {
259 | id: string;
260 | model: string;
261 | results: ModerationResult[];
262 | }
263 |
264 | // Parameters for createModeration
265 | export interface CreateModerationOptions {
266 | input: string | string[];
267 | model?: string;
268 | }
269 |
270 | //
271 | // Edits
272 | //
273 |
274 | export interface EditChoice {
275 | text: string;
276 | index: number;
277 | }
278 |
279 | export interface EditResponse {
280 | object: string;
281 | created: number;
282 | choices: EditChoice[];
283 | usage: {
284 | prompt_tokens: number;
285 | completion_tokens: number;
286 | total_tokens: number;
287 | };
288 | }
289 |
290 | // Parameters for createEdit
291 | export interface CreateEditOptions {
292 | model: string;
293 | input: string;
294 | instruction: string;
295 | n?: number;
296 | temperature?: number;
297 | top_p?: number;
298 | }
299 |
300 | //
301 | // Images
302 | //
303 |
304 | export interface ImageData {
305 | url?: string;
306 | b64_json?: string;
307 | }
308 |
309 | export interface ImageResponse {
310 | created: number;
311 | data: ImageData[];
312 | }
313 |
314 | // Parameters for image methods
315 | export interface CreateImageOptions {
316 | prompt: string;
317 | n?: number;
318 | size?: '256x256' | '512x512' | '1024x1024';
319 | response_format?: 'url' | 'b64_json';
320 | user?: string;
321 | }
322 |
323 | export interface CreateImageEditOptions {
324 | image: string; // File path
325 | mask?: string; // File path
326 | prompt: string;
327 | n?: number;
328 | size?: '256x256' | '512x512' | '1024x1024';
329 | response_format?: 'url' | 'b64_json';
330 | user?: string;
331 | }
332 |
333 | export interface CreateImageVariationOptions {
334 | image: string; // File path
335 | n?: number;
336 | size?: '256x256' | '512x512' | '1024x1024';
337 | response_format?: 'url' | 'b64_json';
338 | user?: string;
339 | }
340 |
341 | //
342 | // Audio
343 | //
344 |
345 | // Parameters for transcribeAudio and translateAudio
346 | export interface AudioOptions {
347 | model?: string;
348 | prompt?: string;
349 | response_format?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
350 | temperature?: number;
351 | language?: string;
352 | }
353 |
354 | //
355 | // Files
356 | //
357 |
358 | export interface FileObject {
359 | id: string;
360 | object: string;
361 | bytes: number;
362 | created_at: number;
363 | filename: string;
364 | purpose: string;
365 | }
366 |
367 | export interface ListFilesResponse {
368 | data: FileObject[];
369 | object: string;
370 | }
371 |
372 | // File Content
373 | export type FileContent = Buffer;
374 |
375 | // Parameters for uploadFile
376 | export interface UploadFileOptions {
377 | filePath: string;
378 | purpose: string;
379 | }
380 |
381 | //
382 | // Fine-Tunes
383 | //
384 |
385 | export interface FineTuneResponse {
386 | id: string;
387 | object: string;
388 | model: string;
389 | created_at: number;
390 | events: any[];
391 | fine_tuned_model: string | null;
392 | hyperparams: any;
393 | organization_id: string;
394 | result_files: any[];
395 | status: string;
396 | validation_files: any[];
397 | training_files: any[];
398 | updated_at: number;
399 | }
400 |
401 | export interface FineTuneEvent {
402 | object: string;
403 | created_at: number;
404 | level: string;
405 | message: string;
406 | }
407 |
408 | // Response for listing Fine-Tune events
409 | export interface ListFineTuneEventsResponse {
410 | object: string;
411 | data: FineTuneEvent[];
412 | }
413 |
414 | // Parameters for createFineTune
415 | export interface CreateFineTuneOptions {
416 | training_file: string;
417 | validation_file?: string;
418 | model?: string;
419 | n_epochs?: number;
420 | batch_size?: number;
421 | learning_rate_multiplier?: number;
422 | prompt_loss_weight?: number;
423 | compute_classification_metrics?: boolean;
424 | classification_n_classes?: number;
425 | classification_positive_class?: string;
426 | classification_betas?: number[];
427 | suffix?: string;
428 | }
429 |
430 | export interface ErrorResponse {
431 | error?: {
432 | message?: string;
433 | };
434 | }
435 |
--------------------------------------------------------------------------------
/docs/interfaces/types.EmbeddingData.html:
--------------------------------------------------------------------------------
1 | EmbeddingData | OpenAI Enhanced SDK Properties
embedding
embedding: number[]
5 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ModerationResponse.html:
--------------------------------------------------------------------------------
1 | ModerationResponse | OpenAI Enhanced SDKInterface ModerationResponse
Properties
5 |
--------------------------------------------------------------------------------
/docs/interfaces/types.CreateEmbeddingOptions.html:
--------------------------------------------------------------------------------
1 | CreateEmbeddingOptions | OpenAI Enhanced SDKInterface CreateEmbeddingOptions
interface CreateEmbeddingOptions { input: string | string[]; model: string; user?: string; } Properties
input
input: string | string[]
Optionaluser
user?: string
5 |
--------------------------------------------------------------------------------
/docs/interfaces/types.ModerationResult.html:
--------------------------------------------------------------------------------
1 | ModerationResult | OpenAI Enhanced SDKInterface ModerationResult
Properties
categories
categories: Record<string, boolean>
category_scores
category_scores: Record<string, number>
5 |
--------------------------------------------------------------------------------