├── .devcontainer └── devcontainer.json ├── .env.local.example ├── .gitignore ├── .replit ├── .vscode ├── extensions.json └── launch.json ├── LICENSE ├── README.md ├── app ├── api │ ├── assistant │ │ ├── assistant-setup.md │ │ └── route.ts │ ├── chat-with-functions-2 │ │ └── route.ts │ ├── chat-with-functions │ │ └── route.ts │ ├── chat-with-tools │ │ └── route.ts │ ├── chat-with-vision │ │ └── route.ts │ ├── chat │ │ └── route.ts │ ├── completion │ │ └── route.ts │ └── spell-check │ │ └── route.ts ├── assistant │ └── page.tsx ├── basic │ └── page.tsx ├── completion │ └── page.tsx ├── favicon.ico ├── function-calling │ └── page.tsx ├── globals.css ├── layout.tsx ├── map-demo │ └── page.tsx ├── page.tsx ├── server-components │ ├── page.tsx │ └── tokens │ │ └── page.tsx ├── spell-check │ └── page.tsx ├── stream-react-response │ ├── action.tsx │ ├── chat.tsx │ ├── layout.tsx │ └── page.tsx ├── tool-calling │ └── page.tsx ├── ui-demo │ ├── page.tsx │ └── style.css └── vision │ └── page.tsx ├── components.json ├── components ├── form │ └── index.tsx ├── home │ └── index.tsx ├── map │ └── map.tsx ├── sidebar │ └── index.tsx └── ui │ ├── button.tsx │ └── input.tsx ├── jest.config.js ├── lib ├── parseStreamingJson.test.ts ├── parseStreamingJson.ts └── utils.ts ├── next.config.js ├── package-lock.json ├── package.json ├── pnpm-lock.yaml ├── postcss.config.js ├── public ├── logo.svg ├── pin-blue.svg └── pin-red.svg ├── replit.nix ├── tailwind.config.js ├── tailwind.config.ts ├── tsconfig.json └── yarn.lock /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/universal 3 | { 4 | "name": "Default Linux Universal", 5 | // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile 6 | "image": "mcr.microsoft.com/devcontainers/universal:2-linux", 7 | "features": { 8 | "ghcr.io/stuartleeks/dev-container-features/shell-history:0": {} 9 | }, 10 | "customizations": { 11 | "vscode": { 12 | "extensions": [ 13 | "GitHub.copilot", 14 | "bradlc.vscode-tailwindcss", 15 | "eamodio.gitlens", 16 | "ms-toolsai.jupyter", 17 | "ms-python.python" 18 | ] 19 | } 20 | } 21 | 22 | // Features to add to the dev container. More info: https://containers.dev/features. 23 | // "features": {}, 24 | 25 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 26 | // "forwardPorts": [], 27 | 28 | // Use 'postCreateCommand' to run commands after the container is created. 29 | // "postCreateCommand": "uname -a", 30 | 31 | // Configure tool-specific properties. 32 | // "customizations": {}, 33 | 34 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. 35 | // "remoteUser": "root" 36 | } 37 | -------------------------------------------------------------------------------- /.env.local.example: -------------------------------------------------------------------------------- 1 | # You must first activate a Billing Account here: https://platform.openai.com/account/billing/overview 2 | # Then get your OpenAI API Key here: https://platform.openai.com/account/api-keys 3 | OPENAI_API_KEY=XXXXXXXX 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # next.js 12 | /.next/ 13 | /out/ 14 | 15 | # production 16 | /build 17 | 18 | # misc 19 | .DS_Store 20 | *.pem 21 | 22 | # debug 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | 27 | # local env files 28 | .env*.local 29 | .env 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /.replit: -------------------------------------------------------------------------------- 1 | # run = "tsx index.ts" 2 | # entrypoint = "index.ts" 3 | # hidden = [".config", "package-lock.json", "tsconfig.json"] 4 | # onBoot = "npm install -g pnpm@8.6.3" 5 | 6 | # run = "npm run dev" 7 | run = "pnpm dev" 8 | entrypoint = "index.js" 9 | modules = ["nodejs-20:v9-20231020-a3526c9"] 10 | hidden = [".config", "package-lock.json", ".next", ".swc"] 11 | 12 | 13 | [packager] 14 | language = "nodejs" 15 | [packager.features] 16 | enabledForHosting = false 17 | packageSearch = true 18 | guessImports = true 19 | 20 | [nix] 21 | # channel = "stable-22_11" 22 | channel = "stable-23_05" 23 | 24 | [env] 25 | XDG_CONFIG_HOME = "$REPL_HOME/.config" 26 | PATH = "$REPL_HOME/node_modules/.bin:$REPL_HOME/.config/npm/node_global/bin" 27 | npm_config_prefix = "$REPL_HOME/.config/npm/node_global" 28 | 29 | [gitHubImport] 30 | requiredFiles = [".replit", "replit.nix", ".config"] 31 | 32 | [languages] 33 | [languages.typescript] 34 | pattern = "**/{*.ts,*.js,*.tsx,*.jsx,*.json}" 35 | [languages.typescript.languageServer] 36 | start = "typescript-language-server --stdio" 37 | 38 | [deployment] 39 | run = ["tsx", "index.ts"] 40 | deploymentTarget = "cloudrun" 41 | ignorePorts = false 42 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "bradlc.vscode-tailwindcss", 4 | "github.copilot", 5 | "eamodio.gitlens", 6 | "ms-toolsai.jupyter", 7 | "ms-python.python" 8 | ] 9 | } -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "node", 9 | "name": "vscode-jest-tests.v2", 10 | "request": "launch", 11 | "args": [ 12 | "--runInBand", 13 | "--watchAll=false", 14 | "--testNamePattern", 15 | // "${jest.testNamePattern}", 16 | "--runTestsByPath", 17 | // "${jest.testFile}" 18 | "${file}" 19 | ], 20 | "cwd": "${workspaceFolder}", 21 | "console": "integratedTerminal", 22 | "internalConsoleOptions": "neverOpen", 23 | "disableOptimisticBPs": true, 24 | "program": "${workspaceFolder}/node_modules/.bin/jest", 25 | "windows": { 26 | "program": "${workspaceFolder}/node_modules/jest/bin/jest" 27 | } 28 | } 29 | ] 30 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Glavin Wiechert 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Alvea - Revolutionizing User Interaction with AI 2 | 3 | | 👉 [**Demo Video** 🎥](https://www.loom.com/share/0d057eabb1bb430d8902bf7fdc1f3911?sid=1205e6cf-7e68-49e5-9ac7-4136abd5926a) | 4 | | --- | 5 | | [image](https://www.loom.com/share/0d057eabb1bb430d8902bf7fdc1f3911?t=142&sid=163c6712-15d4-42f2-898b-165903617bdb) | 6 | 7 | | About Alvea | 🥇 1st Place Winning Team | 8 | | --- | --- | 9 | | **Alvea is an innovative application designed to enhance productivity and user experience by leveraging generative user interfaces.**

Developed during [Generative UI hackathon](https://partiful.com/e/1antxX3cgLArJFGRNKg4) at [AGI House SF](https://agihouse.ai/), Alvea addresses the limitations of existing AI chat applications by offering dynamic, just-in-time user interfaces that adapt to the user's tasks and needs. | image | 10 | 11 | ## The Problem 12 | 13 | Existing AI chat apps, while boosting productivity, fall short in delivering optimal collaborative and informational experiences due to their reliance on text-based communication. 14 | 15 | ## Our Solution 16 | 17 | Alvea emerges as a solution to this problem by integrating AI to develop dynamically rendering user interfaces that are interactive and tailored to the user's current task. 18 | 19 | ### Key Features 20 | 21 | 24 | 25 | | Feature | Description | Alvea🌟 | Perplexity Copilot | ChatGPT | Specialized Task Apps | 26 | |---------|-------------|-------|---------------------|---------|-----------------------| 27 | | Natural Language Understanding & Response Generation | Effectively understands and generates human-like text responses to user queries. | ✅ | ✅ | ✅ | ❌ | 28 | | Personalized Experience | Tailors experiences based on individual user preferences and tasks. | ✅ | ✅ | ✅ | ❌ | 29 | | Interactive UIs | Offers engaging and adaptable interfaces tailored to user tasks. | ✅ | ✅ | ❌ | ✅ | 30 | | Efficient Data Collection | Streamlines data collection through forms and interactive elements. | ✅ | ✅ | ❌ | ✅ | 31 | | Advanced Visualization & Outcome Previewing | Utilizes complex visuals and previews of outcomes for enhanced comprehension and decision-making. | ✅ | ❌ | ❌ | ✅ | 32 | | Just-in-Time User Interfaces | Generates UI components precisely when needed for the task. | ✅ | 🔶
* Forms only | ❌ | ❌ | 33 | 34 | 35 | ### Example Use Cases 36 | 37 | These are the example tasks demonstrated during the hackathon to showcase how Alvea's just-in-time user interfaces can be used to enhance user experiences. 38 | 39 | 40 | 41 | 46 | 51 | 52 | 53 | 59 | 65 | 66 | 67 | 72 | 77 | 82 | 83 |
42 | 43 | Trip Planning 44 | 45 | 47 | 48 | Home Theater Setup 49 | 50 |
54 | Task: 55 |
"I want to plan a trip to lake tahoe for 2"
56 |

57 | Alvea streamlines trip planning by combining interactive forms for inputting travel preferences with a visual map component, showcasing destinations and points of interest with markers for an intuitive planning experience. 58 |
60 | Task: 61 |
"I'm trying to plan my new home theater room, 5.1.2 dolby atmos. Where should I put my speakers?"
62 |
63 | For home theater enthusiasts, Alvea captures room specifications through a simple form and employs AI code generation to show a 3D visualization of the room, enabling users to virtually place speakers and optimize their audio setup with precision and ease. 64 |
68 | 69 | image 70 | 71 | 73 | 74 | image 75 | 76 | 78 | 79 | image 80 | 81 |
84 | 85 | 86 | ## How to use 87 | 88 | To run the example locally you need to: 89 | 90 | 1. Sign up at [OpenAI's Developer Platform](https://platform.openai.com/signup). 91 | 2. Go to [OpenAI's dashboard](https://platform.openai.com/account/api-keys) and create an API KEY. 92 | 3. Set the required OpenAI environment variable as the token value as shown [the example env file](./.env.local.example) but in a new file called `.env.local` 93 | 4. `pnpm install` to install the required dependencies. 94 | 5. `pnpm dev` to launch the development server. 95 | 96 | ## Built with 97 | 98 | - [Vercel AI SDK](https://sdk.vercel.ai/docs) 99 | - [Next.js](https://nextjs.org/) 100 | - [OpenAI](https://openai.com) 101 | - [React](https://reactjs.org/) 102 | - [TypeScript](https://www.typescriptlang.org/) 103 | - [Tailwind CSS](https://tailwindcss.com/) 104 | - [React JSON Schema Form](https://github.com/rjsf-team/react-jsonschema-form) 105 | - [Leaflet](https://leafletjs.com/) 106 | 107 | ## Learn More 108 | 109 | To learn more about OpenAI, Next.js, and the Vercel AI SDK take a look at the following resources: 110 | 111 | - [Vercel AI SDK docs](https://sdk.vercel.ai/docs) 112 | - [Vercel AI Playground](https://play.vercel.ai) 113 | - [OpenAI Documentation](https://platform.openai.com/docs) - learn about OpenAI features and API. 114 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 115 | -------------------------------------------------------------------------------- /app/api/assistant/assistant-setup.md: -------------------------------------------------------------------------------- 1 | # Home Automation Assistant Example 2 | 3 | ## Setup 4 | 5 | ### Create OpenAI Assistant 6 | 7 | [OpenAI Assistant Website](https://platform.openai.com/assistants) 8 | 9 | Create a new assistant. Enable Code interpreter. Add the following functions and instructions to the assistant. 10 | 11 | Then add the assistant id to the `.env.local` file as `ASSISTANT_ID=your-assistant-id`. 12 | 13 | ### Instructions 14 | 15 | ``` 16 | You are an assistant with access to a home automation system. You can get and set the temperature in the bedroom, home office, living room, kitchen and bathroom. 17 | 18 | The system uses temperature in Celsius. If the user requests Fahrenheit, you should convert the temperature to Fahrenheit. 19 | ``` 20 | 21 | ### getRoomTemperature function 22 | 23 | ```json 24 | { 25 | "name": "getRoomTemperature", 26 | "description": "Get the temperature in a room", 27 | "parameters": { 28 | "type": "object", 29 | "properties": { 30 | "room": { 31 | "type": "string", 32 | "enum": ["bedroom", "home office", "living room", "kitchen", "bathroom"] 33 | } 34 | }, 35 | "required": ["room"] 36 | } 37 | } 38 | ``` 39 | 40 | ### setRoomTemperature function 41 | 42 | ```json 43 | { 44 | "name": "setRoomTemperature", 45 | "description": "Set the temperature in a room", 46 | "parameters": { 47 | "type": "object", 48 | "properties": { 49 | "room": { 50 | "type": "string", 51 | "enum": ["bedroom", "home office", "living room", "kitchen", "bathroom"] 52 | }, 53 | "temperature": { "type": "number" } 54 | }, 55 | "required": ["room", "temperature"] 56 | } 57 | } 58 | ``` 59 | 60 | ## Run 61 | 62 | 1. Run `pnpm run dev` in `examples/next-openai` 63 | 2. Go to http://localhost:3000/assistant 64 | -------------------------------------------------------------------------------- /app/api/assistant/route.ts: -------------------------------------------------------------------------------- 1 | import { experimental_AssistantResponse } from 'ai'; 2 | import OpenAI from 'openai'; 3 | import { MessageContentText } from 'openai/resources/beta/threads/messages/messages'; 4 | 5 | // Create an OpenAI API client (that's edge friendly!) 6 | const openai = new OpenAI({ 7 | apiKey: process.env.OPENAI_API_KEY || '', 8 | }); 9 | 10 | // IMPORTANT! Set the runtime to edge 11 | export const runtime = 'edge'; 12 | 13 | const homeTemperatures = { 14 | bedroom: 20, 15 | 'home office': 21, 16 | 'living room': 21, 17 | kitchen: 22, 18 | bathroom: 23, 19 | }; 20 | 21 | export async function POST(req: Request) { 22 | // Parse the request body 23 | const input: { 24 | threadId: string | null; 25 | message: string; 26 | } = await req.json(); 27 | 28 | // Create a thread if needed 29 | const threadId = input.threadId ?? (await openai.beta.threads.create({})).id; 30 | 31 | // Add a message to the thread 32 | const createdMessage = await openai.beta.threads.messages.create(threadId, { 33 | role: 'user', 34 | content: input.message, 35 | }); 36 | 37 | return experimental_AssistantResponse( 38 | { threadId, messageId: createdMessage.id }, 39 | async ({ threadId, sendMessage, sendDataMessage }) => { 40 | // Run the assistant on the thread 41 | const run = await openai.beta.threads.runs.create(threadId, { 42 | assistant_id: 43 | process.env.ASSISTANT_ID ?? 44 | (() => { 45 | throw new Error('ASSISTANT_ID is not set'); 46 | })(), 47 | }); 48 | 49 | async function waitForRun(run: OpenAI.Beta.Threads.Runs.Run) { 50 | // Poll for status change 51 | while (run.status === 'queued' || run.status === 'in_progress') { 52 | // delay for 500ms: 53 | await new Promise(resolve => setTimeout(resolve, 500)); 54 | 55 | run = await openai.beta.threads.runs.retrieve(threadId!, run.id); 56 | } 57 | 58 | // Check the run status 59 | if ( 60 | run.status === 'cancelled' || 61 | run.status === 'cancelling' || 62 | run.status === 'failed' || 63 | run.status === 'expired' 64 | ) { 65 | throw new Error(run.status); 66 | } 67 | 68 | if (run.status === 'requires_action') { 69 | if (run.required_action?.type === 'submit_tool_outputs') { 70 | const tool_outputs = 71 | run.required_action.submit_tool_outputs.tool_calls.map( 72 | toolCall => { 73 | const parameters = JSON.parse(toolCall.function.arguments); 74 | 75 | switch (toolCall.function.name) { 76 | case 'getRoomTemperature': { 77 | const temperature = 78 | homeTemperatures[ 79 | parameters.room as keyof typeof homeTemperatures 80 | ]; 81 | 82 | return { 83 | tool_call_id: toolCall.id, 84 | output: temperature.toString(), 85 | }; 86 | } 87 | 88 | case 'setRoomTemperature': { 89 | const oldTemperature = 90 | homeTemperatures[ 91 | parameters.room as keyof typeof homeTemperatures 92 | ]; 93 | 94 | homeTemperatures[ 95 | parameters.room as keyof typeof homeTemperatures 96 | ] = parameters.temperature; 97 | 98 | sendDataMessage({ 99 | role: 'data', 100 | data: { 101 | oldTemperature, 102 | newTemperature: parameters.temperature, 103 | description: `Temperature in ${parameters.room} changed from ${oldTemperature} to ${parameters.temperature}`, 104 | }, 105 | }); 106 | 107 | return { 108 | tool_call_id: toolCall.id, 109 | output: `temperature set successfully`, 110 | }; 111 | } 112 | 113 | default: 114 | throw new Error( 115 | `Unknown tool call function: ${toolCall.function.name}`, 116 | ); 117 | } 118 | }, 119 | ); 120 | 121 | run = await openai.beta.threads.runs.submitToolOutputs( 122 | threadId!, 123 | run.id, 124 | { tool_outputs }, 125 | ); 126 | 127 | await waitForRun(run); 128 | } 129 | } 130 | } 131 | 132 | await waitForRun(run); 133 | 134 | // Get new thread messages (after our message) 135 | const responseMessages = ( 136 | await openai.beta.threads.messages.list(threadId, { 137 | after: createdMessage.id, 138 | order: 'asc', 139 | }) 140 | ).data; 141 | 142 | // Send the messages 143 | for (const message of responseMessages) { 144 | sendMessage({ 145 | id: message.id, 146 | role: 'assistant', 147 | content: message.content.filter( 148 | content => content.type === 'text', 149 | ) as Array, 150 | }); 151 | } 152 | }, 153 | ); 154 | } 155 | -------------------------------------------------------------------------------- /app/api/chat-with-functions-2/route.ts: -------------------------------------------------------------------------------- 1 | import { 2 | OpenAIStream, 3 | StreamingTextResponse, 4 | // experimental_StreamData, 5 | } from 'ai'; 6 | import OpenAI from 'openai'; 7 | import type { ChatCompletionCreateParams } from 'openai/resources/chat'; 8 | 9 | // IMPORTANT! Set the runtime to edge 10 | export const runtime = 'edge'; 11 | 12 | const functions: ChatCompletionCreateParams.Function[] = [ 13 | /* 14 | { 15 | name: 'get_current_weather', 16 | description: 'Get the current weather.', 17 | parameters: { 18 | type: 'object', 19 | properties: { 20 | format: { 21 | type: 'string', 22 | enum: ['celsius', 'fahrenheit'], 23 | description: 'The temperature unit to use.', 24 | }, 25 | }, 26 | required: ['format'], 27 | }, 28 | }, 29 | { 30 | name: 'eval_code_in_browser', 31 | description: 'Execute javascript code in the browser with eval().', 32 | parameters: { 33 | type: 'object', 34 | properties: { 35 | code: { 36 | type: 'string', 37 | description: `Javascript code that will be directly executed via eval(). Do not use backticks in your response. 38 | DO NOT include any newlines in your response, and be sure to provide only valid JSON when providing the arguments object. 39 | The output of the eval() will be returned directly by the function.`, 40 | }, 41 | }, 42 | required: ['code'], 43 | }, 44 | }, 45 | */ 46 | /* 47 | { 48 | name: 'upsert_text_document', 49 | description: 'Write a long rich text document in Markdown', 50 | parameters: { 51 | type: 'object', 52 | properties: { 53 | title: { 54 | type: 'string', 55 | // description: ``, 56 | }, 57 | content: { 58 | type: 'string', 59 | description: ``, 60 | }, 61 | }, 62 | required: ['content'], 63 | }, 64 | }, 65 | { 66 | name: 'edit_text', 67 | description: 'Write a long rich text document in Markdown', 68 | parameters: { 69 | type: 'object', 70 | properties: { 71 | title: { 72 | type: 'string', 73 | // description: ``, 74 | }, 75 | contents: { 76 | type: 'string', 77 | // description: ``, 78 | }, 79 | }, 80 | required: ['code'], 81 | }, 82 | }, 83 | */ 84 | 85 | // Form v3 86 | { 87 | name: 'create_simple_form', 88 | description: 'Use this function to convert user-provided information into a structured form. It dynamically generates a form based on the provided JSON schema, tailored to capture specific details as requested by the user. The function ensures that the form is interactive and user-friendly, making it ideal for collecting and organizing user inputs efficiently.', 89 | parameters: { 90 | type: 'object', 91 | properties: { 92 | id: { 93 | type: 'string', 94 | description: 'Unique identifier for the form. Use a new ID for a new form or an existing ID to edit an existing form.' 95 | }, 96 | jsonSchema: { 97 | // type: 'string', 98 | // description: 'Stringified object of JSON schema defining the structure of the form. It should include field types, titles, and descriptions. Define the data types, required fields, and overall structure of your form here. The schema dictates how user inputs are structured and validated. Do not use array types.' 99 | type: 'object', 100 | description: `Object of JSON schema defining the structure of the form. It should include field types, titles, and descriptions. Define the data types, required fields, and overall structure of your form here. The schema dictates how user inputs are structured and validated. 101 | - Must always include clear & concise 'title' property for each field in JSON Schema. 102 | - Must always include informative & detailed 'description' property for each field in JSON Schema. 103 | - Use UI Schema 'ui:placeholder' property to provide examples. 104 | - Valid types: string, number, integer, object. Avoid: Do not use array and boolean types. 105 | - Valid formats (optional): date, date-time. 106 | - Must always use the most appropriate and specific type and format available. 107 | - Range inputs must be split into multiple fields (e.g. start-stop, min-max, etc are 2 fields/questions). 108 | - Can include any additional JSON Schema properties for each field to customize the form's presentation. 109 | - To aid in fast user input when there are finite choices use the enum property to provide a list of options for each field, or if the answer can be parsed as a number then use number type. 110 | For example, instead of room size being one string input, it can be split into three number inputs: length and width and height.`, 111 | // Prefer to ask structured questions with multiple choice answers rather than open-ended questions unless necessary. This will enable using the selected values or numbers as inputs for programs which cannot interpret text.`, 112 | properties: { 113 | type: { 114 | type: 'string', 115 | description: 'Value must be "object"' 116 | }, 117 | } 118 | }, 119 | uiSchema: { 120 | // type: 'string', 121 | // description: 'Stringified object of UI schema for customizing the form\'s presentation. Customize the layout and presentation of your form fields here, including widget types and help texts. This schema controls the visual aspects of the form, enhancing user interaction and experience.' 122 | type: 'object', 123 | description: `Object of UI schema for customizing the form\'s presentation. Customize the layout and presentation of your form fields here, including widget types and help texts. This schema controls the visual aspects of the form, enhancing user interaction and experience. 124 | Must include thoughtful and helpful and nonredundant 'ui:placeholder' and 'ui:help' for each field. 125 | Include any additional properties for each field to customize the form's presentation.`, 126 | properties: {} 127 | } 128 | }, 129 | required: ['id', 'jsonSchema', 'uiSchema'] 130 | } 131 | }, 132 | 133 | // Map v2 134 | // Enhanced Map Component 135 | { 136 | name: 'create_dynamic_map', 137 | description: 'This function dynamically generates an interactive map based on user inputs. It is designed to visually represent geographic data or locations as specified by the user. The map can be customized with various markers, zoom levels, and center points, making it ideal for applications in travel planning, event location scouting, or geographical data visualization.', 138 | parameters: { 139 | type: 'object', 140 | properties: { 141 | id: { 142 | type: 'string', 143 | description: 'Unique identifier for the map. Use a new ID for a new map or an existing ID to edit an existing map. This ensures each map instance is uniquely referenced and managed.' 144 | }, 145 | center: { 146 | type: 'object', 147 | properties: { 148 | area: { 149 | type: 'string', 150 | description: 'A short textual description for central focus, such as a place name or description.' 151 | }, 152 | lon: { 153 | type: 'number', 154 | description: 'Longitude of the map’s center point. Determines the horizontal focal point of the map.' 155 | }, 156 | lat: { 157 | type: 'number', 158 | description: 'Latitude of the map’s center point. Determines the vertical focal point of the map.' 159 | } 160 | }, 161 | required: ['lon', 'lat'], 162 | description: 'Coordinates for the central focus of the map. This setting controls which geographical area the map initially displays.' 163 | }, 164 | zoomLevel: { 165 | type: 'number', 166 | description: 'Defines the initial zoom level of the map. A higher value indicates a closer view, and a lower value provides a broader view. Adjust this to control how much of the area around the center point is visible upon loading.' 167 | }, 168 | markers: { 169 | type: 'array', 170 | description: 'A collection of markers to be placed on the map. Each marker represents a specific location or point of interest.', 171 | items: { 172 | type: 'object', 173 | properties: { 174 | label: { 175 | type: 'string', 176 | description: 'A textual label for the marker, such as a place name or description.' 177 | }, 178 | lon: { 179 | type: 'number', 180 | description: 'Longitude of the marker position.' 181 | }, 182 | lat: { 183 | type: 'number', 184 | description: 'Latitude of the marker position.' 185 | }, 186 | color: { 187 | type: 'string', 188 | description: 'Color of the marker. This can be used to categorize or differentiate markers.' 189 | } 190 | }, 191 | required: ['label', 'lon', 'lat'] 192 | } 193 | } 194 | }, 195 | required: ['id', 'center', 'zoomLevel', 'markers'], 196 | /* 197 | additionalProperties: { 198 | interactiveFeatures: { 199 | type: 'object', 200 | properties: { 201 | draggableMarkers: { 202 | type: 'boolean', 203 | description: 'Allow markers to be draggable for user interaction. Useful for applications requiring location adjustments.' 204 | }, 205 | routePlanning: { 206 | type: 'boolean', 207 | description: 'Enable route planning features between markers. Ideal for travel or logistics planning.' 208 | }, 209 | areaHighlighting: { 210 | type: 'boolean', 211 | description: 'Allow users to highlight specific areas on the map, useful for emphasizing regions or territories.' 212 | } 213 | }, 214 | description: 'Optional interactive features that enhance user engagement with the map. These can be enabled or disabled based on application requirements.' 215 | } 216 | } 217 | */ 218 | } 219 | }, 220 | 221 | 222 | // 3D generation 223 | /* 224 | { 225 | name: 'upsert_3d_scene', 226 | description: 'Generate 3D scene to visually represent the scene described in the form', 227 | parameters: { 228 | type: 'object', 229 | properties: { 230 | id: { 231 | type: 'string', 232 | description: `Form identifier. To add a new form create a new unique auto-incrementing ID. To edit an existing form use an existing ID here.`, 233 | }, 234 | descriptionOfScene: { 235 | type: 'string', 236 | description: 'Exhaustive detailed description of the scene which will be given to an expert 3D software developer' 237 | }, 238 | } 239 | }, 240 | }, 241 | */ 242 | // Checklist 243 | /* 244 | { 245 | name: 'create_interactive_checklist', 246 | description: 'This function dynamically generates an interactive checklist based on user inputs. Designed to enable users to efficiently manage tasks, goals, or items, this checklist can be customized with various options and states. It is ideal for applications in task management, event planning, or any scenario where a list of items needs to be tracked and updated.', 247 | parameters: { 248 | type: 'object', 249 | properties: { 250 | id: { 251 | type: 'string', 252 | description: `Form identifier. To add a new form create a new unique auto-incrementing ID. To edit an existing form use an existing ID here.`, 253 | }, 254 | items: { 255 | type: 'array', 256 | items: { 257 | type: 'object', 258 | properties: { 259 | id: { 260 | type: 'string', 261 | description: 'Unique identifier for the item. This helps in tracking and updating individual checklist items.' 262 | }, 263 | label: { 264 | type: 'string', 265 | description: 'Text label for the checklist item. This should clearly describe the task or action to be taken.' 266 | }, 267 | checked: { 268 | type: 'boolean', 269 | description: 'Indicates whether the checklist item is initially marked as completed (true) or pending (false).' 270 | }, 271 | }, 272 | required: ['id', 'label', 'checked'] 273 | } 274 | } 275 | }, 276 | required: ['id', 'items'], 277 | }, 278 | } 279 | */ 280 | ]; 281 | 282 | export async function POST(req: Request) { 283 | try { 284 | const body = await req.json(); 285 | const { messages, apiKey } = body; 286 | 287 | // Create an OpenAI API client (that's edge friendly!) 288 | const openai = new OpenAI({ 289 | apiKey: apiKey || process.env.OPENAI_API_KEY || '', 290 | }); 291 | 292 | const response = await openai.chat.completions.create({ 293 | // model: 'gpt-3.5-turbo-0613', 294 | // model: 'gpt-4-1106-preview', 295 | model: 'gpt-4-0125-preview', 296 | stream: true, 297 | messages: [ 298 | { 299 | // id: nanoid(), 300 | role: 'system', 301 | content: ` 302 | You are an intelligent assistant specializing in understanding user needs and intentions for the purpose of dynamically constructing a context-dependent UI using available components. 303 | 304 | When you receive a user's input, your first task is to decipher the user's intention. Consider the context, the specifics of the request, and any underlying needs or goals. If the request is ambiguous or lacks detail, ask targeted follow-up questions to gather the necessary information. Your aim is to develop a clear and comprehensive understanding of what the user wants to achieve, such that you can invoke the following tools to display to the user: 305 | 306 | Available tools: 307 | - Interactive Map: Essential for travel planning, event locations, and potentially home automation control. 308 | - 3D Rendering Engine: For interior design, home automation visualization, and potentially for event space planning. 309 | - Customizable Forms/Input Components: To present to a user to ask them follow up questions that clarify their intent. 310 | 311 | Instructions: 312 | - If you need further context from the user to understand their intention sufficient enough to generate a good UI, respond with 3-5 follow-up questions or statements to clarify the user's intention. Focus on understanding the specific requirements, preferences, or constraints related to their request. 313 | - If you have only 1 quick follow-up question then use chat, otherwise must always use the 'create_simple_form' function. 314 | ` 315 | // content: ` 316 | // Now you are an advanced interface designer, capable of creating structured UI schemas based on the available user requirements. 317 | 318 | // Now that you have analyzed the user's intentions, your next step is to design an interactive, user-friendly form that captures all necessary follow up information to address their request. Use the insights gathered from these follow-up questions to construct a YAML schema and corresponding UI schema that will guide the user through providing detailed and specific information. 319 | 320 | // Instructions: 321 | // - Only return correctly formatted JSON output which satisfies the AskUserQuestions type and no comments. Then, create a UI schema focusing on user-friendly interaction methods 322 | // - Communicate using only the TypeScript types RJSFSchema, UiSchema 323 | // - Must always use block scalar indicator style in YAML 324 | // - Make sure you always add help text to input fields 325 | // - For each form field, start with a sensible default 326 | // Bonus: 327 | // - After gathering all the user input, summarize the user's intent in a concise statement, which will inform the choice and configuration of the UI tools that will be invoked using the JSON output from this step. 328 | // ` 329 | }, 330 | // { 331 | // id: nanoid(), 332 | // role: 'assistant', 333 | // function_call: `{"function_call": {"name": "create_simple_form", "arguments": "{\n \"id\": \"trip_planning_form\",\n \"jsonSchema\": \"{\\\"title\\\":\\\"Lake Tahoe Trip Planning\\\",\\\"type\\\":\\\"object\\\",\\\"properties\\\":{\\\"dates\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What are the intended dates for your trip?\\\",\\\"format\\\":\\\"date\\\"},\\\"transportation\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"How do you plan to get to Lake Tahoe?\\\",\\\"enum\\\":[\\\"Car\\\",\\\"Bus\\\",\\\"Train\\\",\\\"Plane\\\",\\\"Other\\\"]},\\\"accommodation\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What type of accommodation are you looking for?\\\",\\\"enum\\\":[\\\"Hotel\\\",\\\"Motel\\\",\\\"Cabin\\\",\\\"Resort\\\",\\\"Airbnb\\\"]},\\\"activities\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What activities are you interested in at Lake Tahoe?\\\",\\\"description\\\":\\\"e.g., skiing, hiking, boating\\\"},\\\"budget\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What is your budget for the trip per person?\\\"},\\\"preferences\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"Do you have any specific preferences or needs for this trip`, 334 | // } 335 | ...messages, 336 | ], 337 | functions, 338 | }); 339 | 340 | // const data = new experimental_StreamData(); 341 | const stream = OpenAIStream(response, { 342 | // experimental_onFunctionCall: async ( 343 | // { name, arguments: args }, 344 | // createFunctionCallMessages, 345 | // ) => { 346 | // return; 347 | // if (name === 'get_current_weather') { 348 | // // Call a weather API here 349 | // const weatherData = { 350 | // temperature: 20, 351 | // unit: args.format === 'celsius' ? 'C' : 'F', 352 | // }; 353 | 354 | // data.append({ 355 | // text: 'Some custom data', 356 | // }); 357 | 358 | // const newMessages = createFunctionCallMessages(weatherData); 359 | // return openai.chat.completions.create({ 360 | // messages: [...messages, ...newMessages], 361 | // stream: true, 362 | // model: 'gpt-3.5-turbo-0613', 363 | // }); 364 | // } 365 | // }, 366 | onCompletion(completion) { 367 | console.log('completion', completion); 368 | }, 369 | // onFinal(completion) { 370 | // // data.close(); 371 | // }, 372 | // experimental_streamData: true, 373 | }); 374 | 375 | // data.append({ 376 | // text: 'Hello, how are you?', 377 | // }); 378 | 379 | // return new StreamingTextResponse(stream, {}, data); 380 | return new StreamingTextResponse(stream); 381 | } catch (error: any) { 382 | console.error(error); 383 | // return new Response('Internal server error', { status: 500 }); 384 | return new Response(error.message, { status: 500 }); 385 | } 386 | } 387 | -------------------------------------------------------------------------------- /app/api/chat-with-functions/route.ts: -------------------------------------------------------------------------------- 1 | import { 2 | OpenAIStream, 3 | StreamingTextResponse, 4 | experimental_StreamData, 5 | } from 'ai'; 6 | import OpenAI from 'openai'; 7 | import type { ChatCompletionCreateParams } from 'openai/resources/chat'; 8 | 9 | // Create an OpenAI API client (that's edge friendly!) 10 | const openai = new OpenAI({ 11 | apiKey: process.env.OPENAI_API_KEY || '', 12 | }); 13 | 14 | // IMPORTANT! Set the runtime to edge 15 | export const runtime = 'edge'; 16 | 17 | const functions: ChatCompletionCreateParams.Function[] = [ 18 | { 19 | name: 'get_current_weather', 20 | description: 'Get the current weather.', 21 | parameters: { 22 | type: 'object', 23 | properties: { 24 | format: { 25 | type: 'string', 26 | enum: ['celsius', 'fahrenheit'], 27 | description: 'The temperature unit to use.', 28 | }, 29 | }, 30 | required: ['format'], 31 | }, 32 | }, 33 | { 34 | name: 'eval_code_in_browser', 35 | description: 'Execute javascript code in the browser with eval().', 36 | parameters: { 37 | type: 'object', 38 | properties: { 39 | code: { 40 | type: 'string', 41 | description: `Javascript code that will be directly executed via eval(). Do not use backticks in your response. 42 | DO NOT include any newlines in your response, and be sure to provide only valid JSON when providing the arguments object. 43 | The output of the eval() will be returned directly by the function.`, 44 | }, 45 | }, 46 | required: ['code'], 47 | }, 48 | }, 49 | ]; 50 | 51 | export async function POST(req: Request) { 52 | const { messages } = await req.json(); 53 | 54 | const response = await openai.chat.completions.create({ 55 | model: 'gpt-3.5-turbo-0613', 56 | stream: true, 57 | messages, 58 | functions, 59 | }); 60 | 61 | const data = new experimental_StreamData(); 62 | const stream = OpenAIStream(response, { 63 | experimental_onFunctionCall: async ( 64 | { name, arguments: args }, 65 | createFunctionCallMessages, 66 | ) => { 67 | if (name === 'get_current_weather') { 68 | // Call a weather API here 69 | const weatherData = { 70 | temperature: 20, 71 | unit: args.format === 'celsius' ? 'C' : 'F', 72 | }; 73 | 74 | data.append({ 75 | text: 'Some custom data', 76 | }); 77 | 78 | const newMessages = createFunctionCallMessages(weatherData); 79 | return openai.chat.completions.create({ 80 | messages: [...messages, ...newMessages], 81 | stream: true, 82 | model: 'gpt-3.5-turbo-0613', 83 | }); 84 | } 85 | }, 86 | onCompletion(completion) { 87 | console.log('completion', completion); 88 | }, 89 | onFinal(completion) { 90 | data.close(); 91 | }, 92 | experimental_streamData: true, 93 | }); 94 | 95 | data.append({ 96 | text: 'Hello, how are you?', 97 | }); 98 | 99 | return new StreamingTextResponse(stream, {}, data); 100 | } 101 | -------------------------------------------------------------------------------- /app/api/chat-with-tools/route.ts: -------------------------------------------------------------------------------- 1 | import { 2 | OpenAIStream, 3 | StreamingTextResponse, 4 | Tool, 5 | ToolCallPayload, 6 | experimental_StreamData, 7 | } from 'ai'; 8 | import OpenAI from 'openai'; 9 | 10 | // Create an OpenAI API client (that's edge friendly!) 11 | const openai = new OpenAI({ 12 | apiKey: process.env.OPENAI_API_KEY || '', 13 | }); 14 | 15 | // IMPORTANT! Set the runtime to edge 16 | export const runtime = 'edge'; 17 | 18 | const tools: Tool[] = [ 19 | { 20 | type: 'function', 21 | function: { 22 | name: 'get_current_weather', 23 | description: 'Get the current weather', 24 | parameters: { 25 | type: 'object', 26 | properties: { 27 | location: { 28 | type: 'string', 29 | description: 'The city and state, e.g. San Francisco, CA', 30 | }, 31 | format: { 32 | type: 'string', 33 | enum: ['celsius', 'fahrenheit'], 34 | description: 35 | 'The temperature unit to use. Infer this from the users location.', 36 | }, 37 | }, 38 | required: ['location', 'format'], 39 | }, 40 | }, 41 | }, 42 | { 43 | type: 'function', 44 | function: { 45 | name: 'eval_code_in_browser', 46 | description: 'Execute javascript code in the browser with eval().', 47 | parameters: { 48 | type: 'object', 49 | properties: { 50 | code: { 51 | type: 'string', 52 | description: `Javascript code that will be directly executed via eval(). Do not use backticks in your response. 53 | DO NOT include any newlines in your response, and be sure to provide only valid JSON when providing the arguments object. 54 | The output of the eval() will be returned directly by the function.`, 55 | }, 56 | }, 57 | required: ['code'], 58 | }, 59 | }, 60 | }, 61 | ]; 62 | 63 | export async function POST(req: Request) { 64 | const { messages } = await req.json(); 65 | 66 | const model = 'gpt-3.5-turbo-0613'; 67 | 68 | const response = await openai.chat.completions.create({ 69 | model, 70 | stream: true, 71 | messages, 72 | tools, 73 | tool_choice: 'auto', 74 | }); 75 | 76 | const data = new experimental_StreamData(); 77 | const stream = OpenAIStream(response, { 78 | experimental_onToolCall: async ( 79 | call: ToolCallPayload, 80 | appendToolCallMessage, 81 | ) => { 82 | for (const toolCall of call.tools) { 83 | // Note: this is a very simple example of a tool call handler 84 | // that only supports a single tool call function. 85 | if (toolCall.func.name === 'get_current_weather') { 86 | // Call a weather API here 87 | const weatherData = { 88 | temperature: 20, 89 | unit: toolCall.func.arguments.format === 'celsius' ? 'C' : 'F', 90 | }; 91 | 92 | const newMessages = appendToolCallMessage({ 93 | tool_call_id: toolCall.id, 94 | function_name: 'get_current_weather', 95 | tool_call_result: weatherData, 96 | }); 97 | 98 | return openai.chat.completions.create({ 99 | messages: [...messages, ...newMessages], 100 | model, 101 | stream: true, 102 | tools, 103 | tool_choice: 'auto', 104 | }); 105 | } 106 | } 107 | }, 108 | onCompletion(completion) { 109 | console.log('completion', completion); 110 | }, 111 | onFinal(completion) { 112 | data.close(); 113 | }, 114 | experimental_streamData: true, 115 | }); 116 | 117 | data.append({ 118 | text: 'Hello, how are you?', 119 | }); 120 | 121 | return new StreamingTextResponse(stream, {}, data); 122 | } 123 | -------------------------------------------------------------------------------- /app/api/chat-with-vision/route.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { OpenAIStream, StreamingTextResponse } from 'ai'; 3 | 4 | // Create an OpenAI API client (that's edge friendly!) 5 | const openai = new OpenAI({ 6 | apiKey: process.env.OPENAI_API_KEY || '', 7 | }); 8 | 9 | // IMPORTANT! Set the runtime to edge 10 | export const runtime = 'edge'; 11 | 12 | export async function POST(req: Request) { 13 | // 'data' contains the additional data that you have sent: 14 | const { messages, data } = await req.json(); 15 | 16 | const initialMessages = messages.slice(0, -1); 17 | const currentMessage = messages[messages.length - 1]; 18 | 19 | // Ask OpenAI for a streaming chat completion given the prompt 20 | const response = await openai.chat.completions.create({ 21 | model: 'gpt-4-vision-preview', 22 | stream: true, 23 | max_tokens: 150, 24 | messages: [ 25 | ...initialMessages, 26 | { 27 | ...currentMessage, 28 | content: [ 29 | { type: 'text', text: currentMessage.content }, 30 | 31 | // forward the image information to OpenAI: 32 | { 33 | type: 'image_url', 34 | image_url: data.imageUrl, 35 | }, 36 | ], 37 | }, 38 | ], 39 | }); 40 | 41 | // Convert the response into a friendly text-stream 42 | const stream = OpenAIStream(response); 43 | // Respond with the stream 44 | return new StreamingTextResponse(stream); 45 | } 46 | -------------------------------------------------------------------------------- /app/api/chat/route.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { OpenAIStream, StreamingTextResponse } from 'ai'; 3 | 4 | // Create an OpenAI API client (that's edge friendly!) 5 | const openai = new OpenAI({ 6 | apiKey: process.env.OPENAI_API_KEY || '', 7 | }); 8 | 9 | // IMPORTANT! Set the runtime to edge 10 | export const runtime = 'edge'; 11 | 12 | export async function POST(req: Request) { 13 | const { messages } = await req.json(); 14 | 15 | // Ask OpenAI for a streaming chat completion given the prompt 16 | const response = await openai.chat.completions.create({ 17 | model: 'gpt-3.5-turbo', 18 | stream: true, 19 | messages, 20 | }); 21 | 22 | // Convert the response into a friendly text-stream 23 | const stream = OpenAIStream(response); 24 | // Respond with the stream 25 | return new StreamingTextResponse(stream); 26 | } 27 | -------------------------------------------------------------------------------- /app/api/completion/route.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { 3 | OpenAIStream, 4 | StreamingTextResponse, 5 | experimental_StreamData, 6 | } from 'ai'; 7 | 8 | // Create an OpenAI API client (that's edge friendly!) 9 | const openai = new OpenAI({ 10 | apiKey: process.env.OPENAI_API_KEY || '', 11 | }); 12 | 13 | // IMPORTANT! Set the runtime to edge 14 | export const runtime = 'edge'; 15 | 16 | export async function POST(req: Request) { 17 | // Extract the `prompt` from the body of the request 18 | const { prompt } = await req.json(); 19 | 20 | // Ask OpenAI for a streaming completion given the prompt 21 | const response = await openai.completions.create({ 22 | model: 'gpt-3.5-turbo-instruct', 23 | max_tokens: 2000, 24 | stream: true, 25 | prompt, 26 | }); 27 | 28 | // optional: use stream data 29 | const data = new experimental_StreamData(); 30 | 31 | data.append({ test: 'value' }); 32 | 33 | // Convert the response into a friendly text-stream 34 | const stream = OpenAIStream(response, { 35 | onFinal(completion) { 36 | data.close(); 37 | }, 38 | experimental_streamData: true, 39 | }); 40 | 41 | // Respond with the stream 42 | return new StreamingTextResponse(stream, {}, data); 43 | } 44 | -------------------------------------------------------------------------------- /app/api/spell-check/route.ts: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { OpenAIStream, StreamingTextResponse } from 'ai'; 3 | 4 | export const runtime = 'edge'; 5 | 6 | const openai = new OpenAI({ 7 | apiKey: process.env.OPENAI_API_KEY ?? '', 8 | }); 9 | 10 | export async function POST(req: Request) { 11 | // Extract the `prompt` from the body of the request 12 | const { prompt } = await req.json(); 13 | 14 | // Request the OpenAI API for the response based on the prompt 15 | const response = await openai.chat.completions.create({ 16 | model: 'gpt-3.5-turbo', 17 | stream: true, 18 | // a precise prompt is important for the AI to reply with the correct tokens 19 | messages: [ 20 | { 21 | role: 'user', 22 | content: `Given the following post content, detect if it has typo or not. 23 | Respond with a JSON array of typos ["typo1", "typo2", ...] or an empty [] if there's none. Only respond with an array. Post content: 24 | ${prompt} 25 | 26 | Output:\n`, 27 | }, 28 | ], 29 | max_tokens: 200, 30 | temperature: 0, // you want absolute certainty for spell check 31 | top_p: 1, 32 | frequency_penalty: 1, 33 | presence_penalty: 1, 34 | }); 35 | 36 | const stream = OpenAIStream(response); 37 | 38 | return new StreamingTextResponse(stream); 39 | } 40 | -------------------------------------------------------------------------------- /app/assistant/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { Message, experimental_useAssistant as useAssistant } from 'ai/react'; 4 | import { useEffect, useRef } from 'react'; 5 | 6 | const roleToColorMap: Record = { 7 | system: 'red', 8 | user: 'black', 9 | function: 'blue', 10 | tool: 'purple', 11 | assistant: 'green', 12 | data: 'orange', 13 | }; 14 | 15 | export default function Chat() { 16 | const { status, messages, input, submitMessage, handleInputChange, error } = 17 | useAssistant({ 18 | api: '/api/assistant', 19 | }); 20 | 21 | // When status changes to accepting messages, focus the input: 22 | const inputRef = useRef(null); 23 | useEffect(() => { 24 | if (status === 'awaiting_message') { 25 | inputRef.current?.focus(); 26 | } 27 | }, [status]); 28 | 29 | return ( 30 |
31 | {error != null && ( 32 |
33 | 34 | Error: {(error as any).toString()} 35 | 36 |
37 | )} 38 | 39 | {messages.map((m: Message) => ( 40 |
45 | {`${m.role}: `} 46 | {m.role !== 'data' && m.content} 47 | {m.role === 'data' && ( 48 | <> 49 | {(m.data as any).description} 50 |
51 |
52 |                 {JSON.stringify(m.data, null, 2)}
53 |               
54 | 55 | )} 56 |
57 |
58 |
59 | ))} 60 | 61 | {status === 'in_progress' && ( 62 |
63 | )} 64 | 65 |
66 | 74 |
75 |
76 | ); 77 | } 78 | -------------------------------------------------------------------------------- /app/basic/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { useChat } from 'ai/react'; 4 | 5 | export default function Chat() { 6 | const { messages, input, handleInputChange, handleSubmit } = useChat(); 7 | return ( 8 |
9 | {messages.map(m => ( 10 |
11 | {m.role === 'user' ? 'User: ' : 'AI: '} 12 | {m.content} 13 |
14 | ))} 15 | 16 |
17 | 23 |
24 |
25 | ); 26 | } 27 | //Write a form which asks questions 10 personal questions about me -------------------------------------------------------------------------------- /app/completion/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { useCompletion } from 'ai/react'; 4 | 5 | export default function Chat() { 6 | const { completion, input, handleInputChange, handleSubmit, error, data } = 7 | useCompletion(); 8 | 9 | return ( 10 |
11 |

12 | useCompletion Example 13 |

14 | {data && ( 15 |
16 |           {JSON.stringify(data, null, 2)}
17 |         
18 | )} 19 | {error && ( 20 |
21 | {error.message} 22 |
23 | )} 24 | {completion} 25 |
26 | 32 |
33 |
34 | ); 35 | } 36 | -------------------------------------------------------------------------------- /app/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Glavin001/Alvea-AI/35d089789f983bd49017e5e605e640e9eebea762/app/favicon.ico -------------------------------------------------------------------------------- /app/function-calling/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { FunctionCallHandler, nanoid } from 'ai'; 4 | import { Message, useChat } from 'ai/react'; 5 | 6 | export default function Chat() { 7 | const functionCallHandler: FunctionCallHandler = async ( 8 | chatMessages, 9 | functionCall, 10 | ) => { 11 | if (functionCall.name === 'eval_code_in_browser') { 12 | if (functionCall.arguments) { 13 | // Parsing here does not always work since it seems that some characters in generated code aren't escaped properly. 14 | const parsedFunctionCallArguments: { code: string } = JSON.parse( 15 | functionCall.arguments, 16 | ); 17 | 18 | // WARNING: Do NOT do this in real-world applications! 19 | eval(parsedFunctionCallArguments.code); 20 | 21 | const functionResponse = { 22 | messages: [ 23 | ...chatMessages, 24 | { 25 | id: nanoid(), 26 | name: 'eval_code_in_browser', 27 | role: 'function' as const, 28 | content: parsedFunctionCallArguments.code, 29 | }, 30 | ], 31 | }; 32 | 33 | return functionResponse; 34 | } 35 | } 36 | }; 37 | 38 | const { messages, input, handleInputChange, handleSubmit } = useChat({ 39 | api: '/api/chat-with-functions', 40 | experimental_onFunctionCall: functionCallHandler, 41 | }); 42 | 43 | // Generate a map of message role to text color 44 | const roleToColorMap: Record = { 45 | system: 'red', 46 | user: 'black', 47 | function: 'blue', 48 | tool: 'purple', 49 | assistant: 'green', 50 | data: 'orange', 51 | }; 52 | 53 | return ( 54 |
55 | {messages.length > 0 56 | ? messages.map((m: Message) => ( 57 |
62 | {`${m.role}: `} 63 | {m.content || JSON.stringify(m.function_call)} 64 |
65 |
66 |
67 | )) 68 | : null} 69 |
70 |
71 | 77 |
78 |
79 | ); 80 | } 81 | -------------------------------------------------------------------------------- /app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | @layer base { 6 | :root { 7 | --background: 0 0% 100%; 8 | --foreground: 0 0% 3.9%; 9 | 10 | --card: 0 0% 100%; 11 | --card-foreground: 0 0% 3.9%; 12 | 13 | --popover: 0 0% 100%; 14 | --popover-foreground: 0 0% 3.9%; 15 | 16 | --primary: 0 0% 9%; 17 | --primary-foreground: 0 0% 98%; 18 | 19 | --secondary: 0 0% 96.1%; 20 | --secondary-foreground: 0 0% 9%; 21 | 22 | --muted: 0 0% 96.1%; 23 | --muted-foreground: 0 0% 45.1%; 24 | 25 | --accent: 0 0% 96.1%; 26 | --accent-foreground: 0 0% 9%; 27 | 28 | --destructive: 0 84.2% 60.2%; 29 | --destructive-foreground: 0 0% 98%; 30 | 31 | --border: 0 0% 89.8%; 32 | --input: 0 0% 89.8%; 33 | --ring: 0 0% 3.9%; 34 | 35 | --radius: 0.5rem; 36 | } 37 | 38 | /* .dark { 39 | --background: 0 0% 3.9%; 40 | --foreground: 0 0% 98%; 41 | 42 | --card: 0 0% 3.9%; 43 | --card-foreground: 0 0% 98%; 44 | 45 | --popover: 0 0% 3.9%; 46 | --popover-foreground: 0 0% 98%; 47 | 48 | --primary: 0 0% 98%; 49 | --primary-foreground: 0 0% 9%; 50 | 51 | --secondary: 0 0% 14.9%; 52 | --secondary-foreground: 0 0% 98%; 53 | 54 | --muted: 0 0% 14.9%; 55 | --muted-foreground: 0 0% 63.9%; 56 | 57 | --accent: 0 0% 14.9%; 58 | --accent-foreground: 0 0% 98%; 59 | 60 | --destructive: 0 62.8% 30.6%; 61 | --destructive-foreground: 0 0% 98%; 62 | 63 | --border: 0 0% 14.9%; 64 | --input: 0 0% 14.9%; 65 | --ring: 0 0% 83.1%; 66 | } */ 67 | } 68 | 69 | /* From https://github.com/rjsf-team/react-jsonschema-form/issues/3199#issuecomment-1529863595 */ 70 | @layer components { 71 | .schema-form { 72 | /* Left side is not needed due to fieldset left margin */ 73 | @apply mr-2; 74 | } 75 | 76 | .schema-form, 77 | .schema-form label, 78 | .schema-form input, 79 | .schema-form textarea { 80 | font-size: 12px !important; 81 | } 82 | 83 | .schema-form .chakra-form-control>div { 84 | grid-gap: var(--chakra-space-6) !important; 85 | } 86 | 87 | .schema-form input { 88 | padding: 8px !important; 89 | height: 32px !important; 90 | border-radius: 0px !important; 91 | background-color: var(--chakra-colors-white) !important; 92 | } 93 | 94 | .schema-form textarea { 95 | background-color: white !important; 96 | } 97 | 98 | .schema-form .chakra-button { 99 | background-color: var(--chakra-colors-teal-500) !important; 100 | color: white !important; 101 | font-size: 14px !important; 102 | border-radius: 4px !important; 103 | height: 32px !important; 104 | } 105 | 106 | /* Aka sections */ 107 | .schema-form fieldset>legend { 108 | /* Display block doesn't work for some reason. Does not fill parent width which is still a div, not sure why */ 109 | @apply mb-3 pb-1 w-full border-b pt-4 text-xl font-semibold; 110 | } 111 | 112 | .schema-form fieldset>div { 113 | /* Offset to indicate hierarchy */ 114 | @apply ml-3; 115 | } 116 | 117 | /* Label + component = group */ 118 | .schema-form .form-group { 119 | @apply mb-2; 120 | } 121 | 122 | .schema-form label.control-label { 123 | @apply block mb-1 font-bold; 124 | font-weight: bold; 125 | } 126 | 127 | .schema-form p.field-description { 128 | @apply mb-1; 129 | } 130 | 131 | 132 | .schema-form textarea { 133 | display: block; 134 | } 135 | 136 | /* Array elements */ 137 | .schema-form .array-item { 138 | /* @apply grid grid-cols-12; */ 139 | @apply flex flex-row items-end gap-4; 140 | } 141 | 142 | .schema-form .array-item .col-xs-9 { 143 | /* @apply col-span-9; */ 144 | @apply grow; 145 | } 146 | 147 | .schema-form .array-item .col-xs-3 { 148 | /* @apply col-span-3; */ 149 | @apply shrink-0; 150 | } 151 | 152 | .schema-form .array-item .array-item-toolbox { 153 | /* mb-4 to match .form-group */ 154 | @apply mb-4 flex items-center justify-end; 155 | } 156 | 157 | /* Icons */ 158 | .schema-form .glyphicon { 159 | @apply font-normal not-italic; 160 | } 161 | 162 | .schema-form .glyphicon-remove::before { 163 | content: 'Remove'; 164 | } 165 | 166 | .schema-form .glyphicon-arrow-up::before { 167 | content: 'Up'; 168 | } 169 | 170 | .schema-form .glyphicon-arrow-down::before { 171 | content: 'Down'; 172 | } 173 | 174 | .schema-form .glyphicon-plus::before { 175 | content: 'Add'; 176 | } 177 | 178 | /* Buttons (tends to be icon buttons */ 179 | .schema-form .btn { 180 | @apply rounded-md p-2 border mx-1; 181 | } 182 | 183 | .schema-form .btn-danger { 184 | @apply border-red-200; 185 | } 186 | 187 | .schema-form .btn-add { 188 | @apply border-blue-200; 189 | } 190 | 191 | .schema-form input { 192 | @apply block w-full rounded-md border-0 py-1.5 text-gray-900 shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 focus:ring-2 focus:ring-inset focus:ring-indigo-600 sm:text-sm sm:leading-6; 193 | } 194 | 195 | /* .schema-form button[type="submit"] { 196 | @apply bg-primary text-primary-foreground hover:bg-primary/90; 197 | @apply text-primary-foreground hover:bg-primary/90; 198 | } */ 199 | } -------------------------------------------------------------------------------- /app/layout.tsx: -------------------------------------------------------------------------------- 1 | import './globals.css'; 2 | import { Inter } from 'next/font/google'; 3 | import { Analytics } from '@vercel/analytics/react'; 4 | 5 | const inter = Inter({ subsets: ['latin'] }); 6 | 7 | export const metadata = { 8 | title: 'Alvea', 9 | description: 'Generated by create next app', 10 | }; 11 | 12 | export default function RootLayout({ 13 | children, 14 | }: { 15 | children: React.ReactNode; 16 | }) { 17 | return ( 18 | 19 | 20 | {children} 21 | 22 | 23 | 24 | ); 25 | } 26 | -------------------------------------------------------------------------------- /app/map-demo/page.tsx: -------------------------------------------------------------------------------- 1 | import dynamic from 'next/dynamic'; 2 | import type { MapProps } from '../../components/map/map'; 3 | 4 | const Map = dynamic(() => import('../../components/map/map'), { 5 | ssr: false, 6 | }); 7 | 8 | export default function Page() { 9 | const position: [number, number] = [51.505, -0.09] 10 | const markers: MapProps['markers'] = [{ 11 | label: 'First location', 12 | position: [51.505, -0.09], 13 | color: 'red', 14 | }, { 15 | label: 'Second location', 16 | position: [51.507, -0.07], 17 | color: 'blue', 18 | }] 19 | 20 | return
21 |

Map Demo

22 | 23 |
; 24 | } 25 | 26 | // 0-1 form 27 | // extract form from messgess, move form shown on left 28 | // subit form sends message 29 | // new prompt to detect intent, etc, pick tools 30 | 31 | 32 | 33 | // map labels 34 | // stryle radio nice buttons 35 | // title and descriptiob for fields -------------------------------------------------------------------------------- /app/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | // import { Form } from '@/components/form'; 4 | import { useRef } from 'react'; 5 | import { FunctionCallHandler, nanoid } from 'ai'; 6 | import { Message, useChat } from 'ai/react'; 7 | import { OpenAiHandler } from "openai-partial-stream"; 8 | import { ErrorBoundary } from "react-error-boundary"; 9 | import dynamic from 'next/dynamic'; 10 | 11 | import Home, { HomeProps } from '@/components/home'; 12 | import Sidebar from '@/components/sidebar'; 13 | import Head from 'next/head'; 14 | import { useState } from 'react'; 15 | import { parseStreamingFunctionCall, parseStreamingJsonString } from '../lib/parseStreamingJson'; 16 | 17 | // const Form = dynamic(() => import('@/components/form'), { ssr: false }); 18 | const Form = dynamic(() => import('../components/form'), { ssr: false }); 19 | 20 | const Map = dynamic(() => import('../components/map/map'), { 21 | ssr: false, 22 | }); 23 | 24 | function fallbackRender({ error, resetErrorBoundary }: any) { 25 | // Call resetErrorBoundary() to reset the error boundary and retry the render. 26 | 27 | return ( 28 |
29 |

Something went wrong:

30 |
{error.message}
31 | 32 |
33 | ); 34 | } 35 | 36 | // Generate a map of message role to text color 37 | const roleToColorMap: Record = { 38 | system: 'red', 39 | user: 'black', 40 | function: 'blue', 41 | tool: 'purple', 42 | assistant: 'green', 43 | data: 'orange', 44 | }; 45 | 46 | export default function Chat() { 47 | const functionCallHandler: FunctionCallHandler = async ( 48 | chatMessages, 49 | functionCall, 50 | ) => { 51 | console.log('functionCall', functionCall); 52 | if (functionCall.name === 'eval_code_in_browser') { 53 | if (functionCall.arguments) { 54 | // Parsing here does not always work since it seems that some characters in generated code aren't escaped properly. 55 | // const parsedFunctionCallArguments: { code: string } = JSON.parse( 56 | // functionCall.arguments, 57 | // ); 58 | 59 | try { 60 | const parsedFunctionCallArguments: { code: string } = parseStreamingJsonString( 61 | functionCall.arguments, 62 | ); 63 | console.log('parsedFunctionCallArguments', parsedFunctionCallArguments); 64 | // WARNING: Do NOT do this in real-world applications! 65 | eval(parsedFunctionCallArguments.code); 66 | const functionResponse = { 67 | messages: [ 68 | ...chatMessages, 69 | { 70 | id: nanoid(), 71 | name: 'eval_code_in_browser', 72 | role: 'function' as const, 73 | content: parsedFunctionCallArguments.code, 74 | }, 75 | ], 76 | }; 77 | 78 | return functionResponse; 79 | } catch (error) { 80 | console.error(error); 81 | return; 82 | } 83 | } 84 | } 85 | }; 86 | 87 | const [query, setQuery] = useState(''); 88 | const [mode, setMode] = useState<'home' | 'tools'>('home') 89 | // const [mode, setMode] = useState('tools') 90 | const [apiKey, setApiKey] = useState(null); 91 | 92 | const { messages, input, handleInputChange, handleSubmit, append } = useChat({ 93 | api: '/api/chat-with-functions-2', 94 | body: { 95 | apiKey, 96 | }, 97 | onError: (error) => { 98 | console.error('Chat error:', error); 99 | alert(`Chat error: ${error.message}`); 100 | // Clear API key from local storage 101 | window.localStorage.removeItem('OPENAI_API_KEY'); 102 | }, 103 | experimental_onFunctionCall: functionCallHandler, 104 | // initialMessages: [ 105 | // { 106 | // id: nanoid(), 107 | // role: 'system', 108 | // content: ` 109 | // You are an intelligent assistant specializing in understanding user needs and intentions for the purpose of dynamically constructing a context-dependent UI using available components. 110 | 111 | // When you receive a user's input, your first task is to decipher the user's intention. Consider the context, the specifics of the request, and any underlying needs or goals. If the request is ambiguous or lacks detail, ask targeted follow-up questions to gather the necessary information. Your aim is to develop a clear and comprehensive understanding of what the user wants to achieve, such that you can invoke the following tools to display to the user: 112 | 113 | // Available tools: 114 | // - Interactive Map: Essential for travel planning, event locations, and potentially home automation control. 115 | // - 3D Rendering Engine: For interior design, home automation visualization, and potentially for event space planning. 116 | // - Customizable Forms/Input Components: To present to a user to ask them follow up questions that clarify their intent. 117 | 118 | // Instructions: 119 | // - If you need further context from the user to understand their intention sufficient enough to generate a good UI, respond with 3-5 follow-up questions or statements to clarify the user's intention. Focus on understanding the specific requirements, preferences, or constraints related to their request. 120 | // - If you have only 1 follow-up question then use chat, otherwise always prefer to use a form. 121 | // ` 122 | // // content: ` 123 | // // Now you are an advanced interface designer, capable of creating structured UI schemas based on the available user requirements. 124 | 125 | // // Now that you have analyzed the user's intentions, your next step is to design an interactive, user-friendly form that captures all necessary follow up information to address their request. Use the insights gathered from these follow-up questions to construct a YAML schema and corresponding UI schema that will guide the user through providing detailed and specific information. 126 | 127 | // // Instructions: 128 | // // - Only return correctly formatted JSON output which satisfies the AskUserQuestions type and no comments. Then, create a UI schema focusing on user-friendly interaction methods 129 | // // - Communicate using only the TypeScript types RJSFSchema, UiSchema 130 | // // - Must always use block scalar indicator style in YAML 131 | // // - Make sure you always add help text to input fields 132 | // // - For each form field, start with a sensible default 133 | // // Bonus: 134 | // // - After gathering all the user input, summarize the user's intent in a concise statement, which will inform the choice and configuration of the UI tools that will be invoked using the JSON output from this step. 135 | // // ` 136 | // }, 137 | // // { 138 | // // id: nanoid(), 139 | // // role: 'assistant', 140 | // // function_call: `{"function_call": {"name": "create_simple_form", "arguments": "{\n \"id\": \"trip_planning_form\",\n \"jsonSchema\": \"{\\\"title\\\":\\\"Lake Tahoe Trip Planning\\\",\\\"type\\\":\\\"object\\\",\\\"properties\\\":{\\\"dates\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What are the intended dates for your trip?\\\",\\\"format\\\":\\\"date\\\"},\\\"transportation\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"How do you plan to get to Lake Tahoe?\\\",\\\"enum\\\":[\\\"Car\\\",\\\"Bus\\\",\\\"Train\\\",\\\"Plane\\\",\\\"Other\\\"]},\\\"accommodation\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What type of accommodation are you looking for?\\\",\\\"enum\\\":[\\\"Hotel\\\",\\\"Motel\\\",\\\"Cabin\\\",\\\"Resort\\\",\\\"Airbnb\\\"]},\\\"activities\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What activities are you interested in at Lake Tahoe?\\\",\\\"description\\\":\\\"e.g., skiing, hiking, boating\\\"},\\\"budget\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"What is your budget for the trip per person?\\\"},\\\"preferences\\\":{\\\"type\\\":\\\"string\\\",\\\"title\\\":\\\"Do you have any specific preferences or needs for this trip`, 141 | // // } 142 | // ] 143 | }); 144 | 145 | const submitFirstQuery: HomeProps['runQuery'] = ({ query, apiKey }) => { 146 | setQuery(query); 147 | setApiKey(apiKey); 148 | append({ 149 | id: nanoid(), 150 | role: 'user', 151 | content: query, 152 | createdAt: new Date(), 153 | }, { 154 | options: { 155 | body: { 156 | apiKey, 157 | } 158 | } 159 | }); 160 | setMode('tools'); 161 | }; 162 | 163 | const onSubmitFormComponent = (formValues: any) => { 164 | console.log('onSubmitFormComponent', formValues); 165 | const formResponse: Message = { 166 | id: nanoid(), 167 | name: 'create_simple_form', 168 | role: 'function' as const, 169 | // content: formValues, 170 | content: JSON.stringify(formValues.formData), 171 | // content: (formValues.formData), 172 | }; 173 | append(formResponse); 174 | } 175 | 176 | const isBigMessage = (message: Message) => { 177 | return message.function_call && JSON.stringify(message.function_call).includes('create_dynamic_map') 178 | }; 179 | const bigMessages = messages.filter(isBigMessage); 180 | const chatMessages = messages.filter((msg) => !isBigMessage(msg)) 181 | .filter(message => message.role !== 'system' && message.role !== 'function') 182 | 183 | const bigMessage = bigMessages[bigMessages.length - 1]; 184 | 185 | return ( 186 | <> 187 | 188 | Alvea - UI Demo 189 | 190 |
191 | {mode === 'home' && ( 192 | 193 | )} 194 | {mode === 'tools' && ( 195 |
196 | 197 | {bigMessage && } 198 | 199 |
200 | )} 201 |
202 | 203 | ) 204 | 205 | return ( 206 |
207 | {messages.length > 0 208 | ? messages.map((m: Message) => { 209 | if (m.role === 'system') { 210 | return null; 211 | } 212 | // const openAiHandler = new OpenAiHandler(StreamMode.StreamObjectKeyValueTokens); 213 | // const entityStream = openAiHandler.process(stream); 214 | 215 | // const jsonStreamParser = new StreamParser(StreamMode.StreamObjectKeyValueTokens); 216 | 217 | // const json = !m.content ? parseStreamingJson(m.function_call) : null; 218 | // const json = !m.content ? processNominalJsonString(m.function_call) : null; 219 | // const json = typeof m.function_call === 'string' ? jsonStreamParser.parse(m.function_call) : m.function_call; 220 | // const json = typeof m.function_call === 'string' ? jsonStreamParser.parse(m.function_call) : m.function_call; 221 | // const json = parseFunctionCall(m.function_call); 222 | // console.log('m.function_call', m.function_call, { json }); 223 | const json = typeof m.function_call === 'string' ? parseStreamingJsonString(m.function_call) : m.function_call; 224 | const isFunctionCallDone = typeof m.function_call === 'object'; 225 | 226 | // const json = typeof m.function_call === "object" ? m.function_call : null; 227 | return ( 228 |
233 | {`${m.role}: `} 234 | {/* {typeof m.content === 'string' ? ( 235 | m.content 236 | ) : 237 | m.content ? JSON.stringify(m.content, null, 2) : */} 238 | {m.content ? ( 239 | m.content 240 | ) : 241 | (<> 242 | 245 |
246 |                                             {JSON.stringify(json, null, 2)}
247 |                                         
248 |
{isFunctionCallDone ? "Done!" : "Writing..."}
249 | 250 |
251 | 252 | )} 253 | {/* {m.content || JSON.stringify(m.function_call)} */} 254 |
255 |
256 |
257 | ); 258 | }) 259 | : null} 260 |
261 |
262 | 268 |
269 |
270 | ); 271 | } 272 | 273 | function ShowMessage({ message: m, onSubmitFormComponent }: { message: Message, onSubmitFormComponent: any }) { 274 | // const openAiHandler = new OpenAiHandler(StreamMode.StreamObjectKeyValueTokens); 275 | // const entityStream = openAiHandler.process(stream); 276 | 277 | // const jsonStreamParser = new StreamParser(StreamMode.StreamObjectKeyValueTokens); 278 | 279 | // const json = !m.content ? parseStreamingJson(m.function_call) : null; 280 | // const json = !m.content ? processNominalJsonString(m.function_call) : null; 281 | // const json = typeof m.function_call === 'string' ? jsonStreamParser.parse(m.function_call) : m.function_call; 282 | // const json = typeof m.function_call === 'string' ? jsonStreamParser.parse(m.function_call) : m.function_call; 283 | // const json = parseFunctionCall(m.function_call); 284 | // console.log('m.function_call', m.function_call, { json }); 285 | // const json = typeof m.function_call === 'string' ? parseStreamingJsonString(m.function_call) : m.function_call; 286 | const isFunctionCallDone = typeof m.function_call === 'object'; 287 | 288 | // const json = typeof m.function_call === "object" ? m.function_call : null; 289 | return ( 290 |
295 | {`${m.role.toUpperCase()}: `} 296 | {/* {typeof m.content === 'string' ? ( 297 | m.content 298 | ) : 299 | m.content ? JSON.stringify(m.content, null, 2) : */} 300 | {m.content ? ( 301 | m.content 302 | ) : 303 | (<> 304 | 307 | resetKeys={[JSON.stringify(m.function_call)]}> 308 | {/*
309 |                             {JSON.stringify(json, null, 2)}
310 |                         
*/} 311 | {/*
{isFunctionCallDone ? "Done!" : "Writing..."}
*/} 312 |
{isFunctionCallDone ? "" : "Writing..."}
313 | 314 |
315 | 316 | )} 317 | {/* {m.content || JSON.stringify(m.function_call)} */} 318 |
319 |
320 |
321 | ); 322 | } 323 | 324 | function DynamicComponent({ functionCall: functionCallRaw, onSubmit }: any) { 325 | // console.log('DynamicComponent', functionCall); 326 | 327 | const prevState = useRef({}); 328 | 329 | // return
330 | //
{JSON.stringify(functionCall, null, 2)}
331 | //
332 | 333 | if (!functionCallRaw) { 334 | return null; 335 | } 336 | 337 | // const functionCallJson = typeof functionCallRaw === 'string' ? parseStreamingJsonString(functionCallRaw) : functionCallRaw; 338 | const functionCallJson = typeof functionCallRaw === 'string' ? parseStreamingFunctionCall(functionCallRaw) : functionCallRaw; 339 | 340 | // if functionCall is object and has property functionCall inside it, then use that 341 | const functionCall = functionCallJson.function_call ?? functionCallJson; 342 | 343 | if (functionCall.name === 'create_simple_form') { 344 | if (!functionCall.arguments) { 345 | return
346 | Writing form... 347 | {/*
*/} 348 | 349 | {/* 350 |
351 |                     {JSON.stringify(functionCall, null, 2)}
352 |                 
353 |
354 |                     {JSON.stringify(functionCallRaw, null, 2)}
355 |                 
356 | */} 357 | 358 |
359 | } 360 | 361 | // const args = JSON.parse(functionCall.arguments); 362 | const args = parseStreamingJsonString(functionCall.arguments) ?? {}; 363 | try { 364 | const { jsonSchema: jsonSchemaString, uiSchema: uiSchemaString } = args; 365 | const jsonSchema = jsonSchemaString ? parseStreamingJsonString(jsonSchemaString) : {}; 366 | const uiSchema = uiSchemaString ? parseStreamingJsonString(uiSchemaString) : {}; 367 | 368 | // save to prevState 369 | prevState.current.args = args; 370 | prevState.current.jsonSchema = jsonSchema; 371 | prevState.current.uiSchema = uiSchema; 372 | } catch (error) { 373 | console.error(error); 374 | } 375 | 376 | const { jsonSchema, uiSchema } = prevState.current; 377 | 378 | return
379 | {/* Upsert form */} 380 | 383 | 384 | 385 | {/*
{JSON.stringify(functionCallJson, null, 2)}
*/} 386 | {/*
{JSON.stringify(functionCallRaw, null, 2)}
*/} 387 | {/*
{JSON.stringify(m.function_call, null, 2)}
*/} 388 | {/*
{JSON.stringify(functionCall, null, 2)}
*/} 389 | {/*
{JSON.stringify(functionCallRaw, null, 2)}
*/} 390 | {/*
{JSON.stringify(args, null, 2)}
*/} 391 | {/*
{functionCall?.arguments?.contents ?? functionCall?.arguments?.code}
*/} 392 | {/*
{args?.contents ?? args?.code}
*/} 393 | {/*
{JSON.stringify(jsonSchema, null, 2)}
*/} 394 | {/*
{JSON.stringify(uiSchema, null, 2)}
*/} 395 |
396 | } 397 | else if (functionCall.name === 'create_dynamic_map') { 398 | if (!functionCall.arguments) { 399 | return
400 | Map... 401 |
402 | } 403 | 404 | try { 405 | const args = parseStreamingJsonString(functionCall.arguments); 406 | 407 | const locationToPoint = (loc: any) => ((loc && loc?.lat && loc?.lon) ? [loc.lat, loc.lon] : null); 408 | 409 | // const position = [51.505, -0.09] 410 | // const position = args?.center ? [args?.center?.lat, args?.center?.lon] : [51.505, -0.09] 411 | // const centerPosition = args?.center ? locationToPoint(args?.center) : [51.505, -0.09] 412 | const centerPosition = args?.center ? locationToPoint(args?.center) : null 413 | const zoomLevel = args?.zoomLevel ?? 13; 414 | // const markers = [ 415 | // { 416 | // label: 'First location', 417 | // position: [51.505, -0.09], 418 | // color: 'red', 419 | // }, { 420 | // label: 'Second location', 421 | // position: [51.507, -0.07], 422 | // color: 'blue', 423 | // } 424 | // ] 425 | const markers = args?.markers?.map((marker, markerIndex) => ({ 426 | label: `${markerIndex + 1}. ${marker?.label}`, 427 | position: locationToPoint(marker), 428 | color: marker?.color, 429 | })) ?? []; 430 | // only markers with position 431 | const readyMarkers = markers.filter(marker => { 432 | // check position has both lon and lat numbers 433 | const hasPosition = marker.position && marker.position.length === 2 && marker.position.every(x => typeof x === 'number'); 434 | return hasPosition; 435 | }); 436 | // get center position from either centerPosition or the average of ready markers position 437 | const startPosition = centerPosition ?? ( 438 | readyMarkers.length > 0 ? (readyMarkers.reduce((acc, marker) => { 439 | acc[0] += marker.position[0]; 440 | acc[1] += marker.position[1]; 441 | return acc; 442 | }, [0, 0]) 443 | .map(x => x / readyMarkers.length) 444 | ) : null); 445 | 446 | // Save startPosition, markers, zoomLevel to prevState 447 | prevState.current.startPosition = startPosition; 448 | prevState.current.markers = readyMarkers; 449 | prevState.current.zoomLevel = zoomLevel; 450 | } catch (error) { 451 | } 452 | 453 | const { startPosition, markers, zoomLevel } = prevState.current; 454 | 455 | // return
456 | return
457 | {/*

Map Demo

*/} 458 | {/*
{JSON.stringify(prevState.current, null, 2)}
*/} 459 | 460 | {startPosition && ( 461 | 462 | )} 463 | 464 |
; 465 | } 466 | 467 | if (JSON.stringify(functionCall).includes('create_simple_form')) { 468 | console.log('weird', functionCall); 469 | } 470 | 471 | return <> 472 |
Writing...
473 | {/*
{JSON.stringify(m.function_call, null, 2)}
*/} 474 | {/*
{JSON.stringify(functionCallRaw, null, 2)}
*/} 475 | {/*
{functionCall?.arguments?.contents ?? functionCall?.arguments?.code}
*/} 476 | 477 | } 478 | -------------------------------------------------------------------------------- /app/server-components/page.tsx: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { OpenAIStream } from 'ai'; 3 | import { Suspense } from 'react'; 4 | 5 | // Optional, but recommended: run on the edge runtime. 6 | // See https://vercel.com/docs/concepts/functions/edge-functions 7 | export const runtime = 'edge'; 8 | 9 | export default async function Page({ 10 | searchParams, 11 | }: { 12 | // note that using searchParams opts your page into dynamic rendering. See https://nextjs.org/docs/app/api-reference/file-conventions/page#searchparams-optional 13 | searchParams: Record; 14 | }) { 15 | const openai = new OpenAI({ 16 | apiKey: process.env.OPENAI_API_KEY!, 17 | }); 18 | 19 | // Request the OpenAI API for the response based on the prompt 20 | const response = await openai.chat.completions.create({ 21 | model: 'gpt-4', 22 | stream: true, 23 | messages: [ 24 | { 25 | role: 'user', 26 | content: 27 | searchParams['prompt'] ?? 'Give me code for generating a JSX button', 28 | }, 29 | ], 30 | }); 31 | 32 | // Convert the response into a friendly text-stream 33 | const stream = OpenAIStream(response); 34 | 35 | const reader = stream.getReader(); 36 | 37 | // We recursively render the stream as it comes in 38 | return ( 39 | 40 | 41 | 42 | ); 43 | } 44 | 45 | async function Reader({ 46 | reader, 47 | }: { 48 | reader: ReadableStreamDefaultReader; 49 | }) { 50 | const { done, value } = await reader.read(); 51 | 52 | if (done) { 53 | return null; 54 | } 55 | 56 | const text = new TextDecoder().decode(value); 57 | 58 | return ( 59 | 60 | {text} 61 | 62 | 63 | 64 | 65 | ); 66 | } 67 | -------------------------------------------------------------------------------- /app/server-components/tokens/page.tsx: -------------------------------------------------------------------------------- 1 | import OpenAI from 'openai'; 2 | import { OpenAIStream } from 'ai'; 3 | import { Tokens } from 'ai/react'; 4 | 5 | export const runtime = 'edge'; 6 | 7 | export default async function Page() { 8 | const openai = new OpenAI({ 9 | apiKey: process.env.OPENAI_API_KEY ?? '', 10 | }); 11 | 12 | const response = await openai.chat.completions.create({ 13 | model: 'gpt-3.5-turbo', 14 | stream: true, 15 | messages: [ 16 | { 17 | role: 'user', 18 | content: 'Tell me about San Francisco', 19 | }, 20 | ], 21 | }); 22 | 23 | // Convert the response into a friendly text-stream using the SDK's wrappers 24 | const stream = OpenAIStream(response); 25 | 26 | return ; 27 | } 28 | -------------------------------------------------------------------------------- /app/spell-check/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { useCompletion } from 'ai/react'; 4 | import { useState, useCallback } from 'react'; 5 | 6 | export default function PostEditorPage() { 7 | // Locally store our blog posts content 8 | const [content, setContent] = useState(''); 9 | const { complete } = useCompletion({ 10 | api: '/api/spell-check', 11 | }); 12 | 13 | const checkAndPublish = useCallback( 14 | async (c: string) => { 15 | const completion = await complete(c); 16 | if (!completion) throw new Error('Failed to check typos'); 17 | const typos = JSON.parse(completion); 18 | // you should add more validation here to make sure the response is valid 19 | if (typos?.length && !window.confirm('Typos found… continue?')) return; 20 | else alert('Post published'); 21 | }, 22 | [complete], 23 | ); 24 | 25 | return ( 26 |
27 |

Post Editor

28 |