├── .env.example ├── .gitignore ├── .npmignore ├── README.md ├── babel.config.js ├── examples └── web-application.png ├── package-lock.json ├── package.json ├── src ├── cli.ts ├── complete-text.ts ├── constants.ts ├── main.ts └── models │ └── cli-options.ts └── tsconfig.json /.env.example: -------------------------------------------------------------------------------- 1 | OPENAI_KEY= 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | node_modules 3 | dist 4 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | src 3 | .env 4 | tests 5 | babel.config.js 6 | tsconfig.json 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GPT Text to Design Diagram 2 | 3 | Using OpenAI GPT models, from a description of a software system or other entity diagram, generate a design diagram image or PDF. 4 | 5 | ## Installation 6 | 7 | Install from npm with `npm install gpt-text-to-diagram`. 8 | 9 | ## Usage 10 | 11 | 1. Set the `OPENAI_KEY` environment variable or pass the key into the `generate` command with the `-k` flag. 12 | 2. Run the `gpt-text-to-diagram` command with the following options: 13 | 14 | ```bash 15 | Options: 16 | -V, --version output the version number 17 | -p, --prompt Required: Software system description to create design from. 18 | -k, --key Optional: OpenAI API Key. Must be passed in through the CLI or environment variable. 19 | -o, --output-file Optional: Output filename template. Must have an extension of png, pdf, or svg. (default: "diagram.png") 20 | -m, --model Optional: OpenAI completion model to use. (default: "text-davinci-003") 21 | -c, --max-tokens Optional: Make tokens to use when generating the response. (default: 500) 22 | -t, --temperature Optional: The temperature parameter to use for the GPT model generation. (default: 0.2) 23 | -h, --help Display helpful information about the CLI. 24 | ``` 25 | 26 | ## Example 27 | 28 | Running the command: 29 | 30 | ```bash 31 | gpt-text-to-diagram generate -p "Entities: Person, Frontend, Backend, Database. Person interacts with the frontend. The frontend sends requests to the backend. The backend performs CRUD operations on data in the database." -o test.png -t 0.8 -c 250 32 | ``` 33 | 34 | Generated the following design diagram: 35 | 36 | ![Example design diagram of a web application](./examples/web-application.png) 37 | -------------------------------------------------------------------------------- /babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [['@babel/preset-env', { targets: { node: 'current' } }], '@babel/preset-typescript'], 3 | }; 4 | -------------------------------------------------------------------------------- /examples/web-application.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jakecyr/gpt-text-to-diagram/4dd4b972ed6451eb2560a41b249586cb62388527/examples/web-application.png -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gpt-text-to-diagram", 3 | "version": "1.0.2", 4 | "description": "Using OpenAI GPT models, from a description of a software system or other entity diagram, generate a design diagram image or PDF.", 5 | "main": "dist/src/main.js", 6 | "bin": "dist/src/main.js", 7 | "author": "Jake Cyr", 8 | "license": "ISC", 9 | "scripts": { 10 | "start": "node dist/src/main.js", 11 | "test": "node --experimental-vm-modules node_modules/jest/bin/jest.js", 12 | "build": "tsc" 13 | }, 14 | "keywords": [ 15 | "chatgpt", 16 | "machine-learning", 17 | "openai", 18 | "gpt", 19 | "design diagram", 20 | "software" 21 | ], 22 | "dependencies": { 23 | "axios": "^1.4.0", 24 | "commander": "^10.0.1", 25 | "dotenv": "^16.1.4", 26 | "markdown-mermaid-exporter": "^1.0.1", 27 | "openai": "^3.2.1" 28 | }, 29 | "devDependencies": { 30 | "@babel/plugin-transform-modules-commonjs": "^7.22.5", 31 | "@babel/preset-env": "^7.22.5", 32 | "@babel/preset-typescript": "^7.22.5", 33 | "@types/jest": "^29.5.2", 34 | "jest": "^29.5.0", 35 | "typescript": "^5.1.3" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/cli.ts: -------------------------------------------------------------------------------- 1 | import { OptionValues, program as commander } from 'commander'; 2 | import packageJSON from '../package.json'; 3 | import { 4 | DEFAULT_MAX_TOKENS, 5 | DEFAULT_MODEL, 6 | DEFAULT_OUTPUT_FILE, 7 | DEFAULT_TEMPERATURE, 8 | ENV_OPENAI_API_KEY, 9 | } from './constants'; 10 | import { CLIOptions } from './models/cli-options'; 11 | 12 | export function parseCLIOptions(): CLIOptions { 13 | commander 14 | .version(packageJSON.version) 15 | .description('Generate a design diagram given the description of the software system.') 16 | .requiredOption('-p, --prompt ', 'Software system description to create design from.') 17 | .option('-k, --key ', 'OpenAI API Key.') 18 | .option( 19 | '-o, --output-file ', 20 | 'Output filename template. Must have an extension of png, pdf, or svg.', 21 | DEFAULT_OUTPUT_FILE, 22 | ) 23 | .option('-m, --model ', 'OpenAI completion model to use.', DEFAULT_MODEL) 24 | .option( 25 | '-c, --max-tokens ', 26 | 'Make tokens to use when generating the response.', 27 | DEFAULT_MAX_TOKENS + '', 28 | ) 29 | .option( 30 | '-t, --temperature ', 31 | 'The temperature parameter to use for the GPT model generation.', 32 | DEFAULT_TEMPERATURE + '', 33 | ); 34 | 35 | const options: OptionValues = commander.parse().opts(); 36 | 37 | validateRawCLIOptions(options); 38 | 39 | return { 40 | prompt: options.prompt, 41 | key: options.key, 42 | outputFile: options.outputFile, 43 | model: options.model, 44 | maxTokens: parseInt(options.maxTokens), 45 | temperature: parseFloat(options.temperature), 46 | }; 47 | } 48 | 49 | function validateRawCLIOptions(options: OptionValues) { 50 | const outputFileExtension = options.outputFile.slice(-3); 51 | const temperature = parseFloat(options.temperature) || DEFAULT_TEMPERATURE; 52 | const maxTokens = parseInt(options.maxTokens) || DEFAULT_MAX_TOKENS; 53 | const openAIKey = options.key || ENV_OPENAI_API_KEY; 54 | 55 | if (!openAIKey) { 56 | console.error( 57 | `Missing OpenAI API key. Set the environment variable OPENAI_KEY or pass the key into the command with the '-k' flag.`, 58 | ); 59 | process.exit(1); 60 | } 61 | 62 | if (!['png', 'svg', 'pdf'].includes(outputFileExtension)) { 63 | console.error( 64 | `Invalid output file extension '${outputFileExtension}'. Expected: png, svg, or pdf.`, 65 | ); 66 | process.exit(1); 67 | } 68 | 69 | if (temperature < 0 || temperature > 1) { 70 | console.error( 71 | `Invalid temperature value. Expected value to be >=0 and <=1, but received ${temperature}.`, 72 | ); 73 | process.exit(1); 74 | } 75 | 76 | if (maxTokens < 0) { 77 | console.error(`Invalid max tokens value. Expected value to be >0, but received ${maxTokens}.`); 78 | process.exit(1); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/complete-text.ts: -------------------------------------------------------------------------------- 1 | import { Configuration, OpenAIApi } from 'openai'; 2 | import { 3 | ENV_OPENAI_API_KEY, 4 | DEFAULT_MODEL, 5 | DEFAULT_MAX_TOKENS, 6 | DEFAULT_TEMPERATURE, 7 | } from './constants'; 8 | import { AxiosError } from 'axios'; 9 | 10 | export async function completeText( 11 | prompt: string, 12 | openAiApiKey: string = ENV_OPENAI_API_KEY, 13 | model: string = DEFAULT_MODEL, 14 | max_tokens: number = DEFAULT_MAX_TOKENS, 15 | temperature: number = DEFAULT_TEMPERATURE, 16 | ): Promise { 17 | const configuration = new Configuration({ 18 | apiKey: openAiApiKey, 19 | }); 20 | 21 | const openai = new OpenAIApi(configuration); 22 | 23 | try { 24 | const completion = await openai.createCompletion({ 25 | model, 26 | max_tokens, 27 | temperature, 28 | prompt, 29 | }); 30 | 31 | return completion.data.choices[0].text; 32 | } catch (e) { 33 | if (e?.isAxiosError) { 34 | const axiosError: AxiosError = e; 35 | 36 | if (axiosError.response.status === 401) { 37 | console.error( 38 | `Error requesting completion from OpenAI API with status ${axiosError.response.status}. Is your API key correct?`, 39 | ); 40 | } else { 41 | console.error( 42 | `Error requesting completion from OpenAI API with status ${ 43 | axiosError.response.status 44 | }.\n${JSON.stringify(axiosError.response.data)}`, 45 | ); 46 | } 47 | 48 | process.exit(1); 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/constants.ts: -------------------------------------------------------------------------------- 1 | import { config } from 'dotenv'; 2 | 3 | config(); 4 | 5 | export const DEFAULT_MODEL: string = 'text-davinci-003'; 6 | export const DEFAULT_OUTPUT_FILE: string = 'diagram.png'; 7 | export const DEFAULT_MAX_TOKENS: number = 500; 8 | export const DEFAULT_TEMPERATURE: number = 0.2; 9 | export const ENV_OPENAI_API_KEY = process.env.OPENAI_KEY; 10 | -------------------------------------------------------------------------------- /src/main.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { convertMarkdownMermaidToImage } from 'markdown-mermaid-exporter'; 4 | import { parseCLIOptions } from './cli'; 5 | import { CLIOptions } from './models/cli-options'; 6 | import { completeText } from './complete-text'; 7 | 8 | (async () => { 9 | const options: CLIOptions = parseCLIOptions(); 10 | 11 | const promptForCompletion = ` 12 | You are a mermaid markdown generation system. 13 | Return only markdown code of a design diagram in mermaid markdown. 14 | Do not respond with any other text. 15 | 16 | Input: ${options.prompt} 17 | 18 | Markdown Code: 19 | \`\`\`mermaid 20 | `; 21 | 22 | const generatedMarkdown = await completeText( 23 | promptForCompletion, 24 | options.key, 25 | options.model, 26 | options.maxTokens, 27 | options.temperature, 28 | ); 29 | 30 | const formattedMarkdown = '```mermaid\n' + generatedMarkdown; 31 | 32 | await convertMarkdownMermaidToImage(formattedMarkdown, options.outputFile); 33 | })(); 34 | -------------------------------------------------------------------------------- /src/models/cli-options.ts: -------------------------------------------------------------------------------- 1 | export interface CLIOptions { 2 | prompt: string; 3 | key: string; 4 | outputFile: `${string}.${'svg' | 'png' | 'pdf'}`; 5 | model: string; 6 | maxTokens: number; 7 | temperature: number; 8 | } 9 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es6", 4 | "module": "nodenext", 5 | "sourceMap": true, 6 | "declaration": true, 7 | "outDir": "./dist", 8 | "skipLibCheck": true, 9 | "esModuleInterop": true, 10 | "resolveJsonModule": true 11 | }, 12 | "include": ["src", "test"], 13 | "exclude": ["node_modules"] 14 | } 15 | --------------------------------------------------------------------------------