├── .gitignore ├── .nvmrc ├── LICENSE ├── README.md ├── index.js ├── package-lock.json ├── package.json └── src ├── components ├── Ragdoll.js ├── RagdollCommandLine.js └── index.js ├── index.js └── utils ├── extraction.js ├── output.js ├── prefix.js ├── storage.js └── strings.js /.gitignore: -------------------------------------------------------------------------------- 1 | # directories 2 | /node_modules 3 | /.tmp 4 | 5 | # env 6 | .env 7 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | 20.11.1 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Benny Schmidt 2024 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ragdoll 2 | 3 | The library for character-driven AI experiences. 4 | 5 | Deploy AI personas for a variety of [use cases](https://github.com/bennyschmidt/ragdoll-studio/blob/master/CaseStudies.md), each with their own distinct knowledge and style. 6 | 7 | ![311941658-6b93b041-f30f-4121-a951-a746a19c75fc](https://github.com/bennyschmidt/ragdoll/assets/45407493/05231ee1-9a40-436f-88a1-dd5b5ec73a1a) 8 | 9 | *Arthas Menethil, World of Warcraft* 10 | 11 | ----- 12 | 13 | ## Benefits 14 | 15 | - **Scoped Knowledge**: Using a generic chatbot like ChatGPT for narrow use cases like customer support, a game NPC, or writing code can yield undesired responses, or provide information outside the intended scope of knowledge. You don't want your MMORPG shopkeeper talking about about Ford F-150s or Chick-Fil-A, do you? Ragdoll scrapes a URL you provide as a knowledge source (usually a Wiki style web page, but could be anything - it's very flexible), and uses [llamaindex](https://github.com/run-llama/LlamaIndexTS) to store and index that knowledge. It handles questions that fall outside of the scope of knowledge gracefully, so it will still feel like the user is interacting with a person even when it doesn't know the answer. 16 | 17 | - **Distinct Personalities**: Answers to questions are always rephrased from the first-person perspective in the style of a persona that you define. Because you're asked to define things like prose, tone, and even art style, Ragdoll is able to generate the appropriate prompts for your persona, resulting in statements the target persona would perceivably say. 18 | 19 | - **Extensible**: Ragdoll can be ran [as an API](https://github.com/bennyschmidt/ragdoll-studio/tree/master/ragdoll-api), in [a React app](https://github.com/bennyschmidt/ragdoll-studio/tree/master/ragdoll-react), as [a CLI](https://github.com/bennyschmidt/ragdoll/blob/master/src/index.js), or as a [a dependency](https://www.npmjs.com/package/ragdoll-core) in your application. It uses [Ollama](https://github.com/run-llama/LlamaIndexTS/blob/main/packages/core/src/llm/ollama.ts) for text so you can choose from a [wide range of models](https://ollama.com/library), and defaults to [Stable Diffusion](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (txt2img) for images. 20 | 21 | ----- 22 | 23 | ### Retrieval-Augmented Generation 24 | 25 | Retrieval-Augmented Generation ([RAG](https://arxiv.org/pdf/2005.11401.pdf)) is a strategy that helps address both LLM hallucinations and out-of-date or off-topic training data. The following diagram shows how data flows into Ragdoll from documents (web pages) and LLM-generated content through an indexed store and query engine before being presented: 26 | 27 | ![diagram](https://github.com/bennyschmidt/ragdoll/assets/45407493/6e730da7-8708-4f20-b4c1-3e7eecdf5061) 28 | 29 | ----- 30 | 31 | ## Web app 32 | 33 | You can interact with Ragdoll via this [Node/React full stack application](https://github.com/bennyschmidt/ragdoll-studio). 34 | 35 | ----- 36 | 37 | ## CLI examples 38 | 39 | ### Image quality & GUI 40 | 41 | Note that in a default Terminal you will not see text colors and the image quality will be diminished. Using a Terminal like [iTerm2](https://iterm2.com) or [Kitty](https://sw.kovidgoyal.net/kitty) will allow you to view the full resolution (512x512 by default). 42 | 43 | _In native Terminal with no addons:_ 44 | 45 | > Question: "what town are you from" 46 | > 47 | > Answer: 48 | > 49 | > ![312186339-4cc0aa1c-1592-425c-9ed3-59a5605d705b](https://github.com/bennyschmidt/ragdoll/assets/45407493/89a4858b-2b70-4ab1-bfca-92da2039d20b) 50 | 51 | _In verbose mode with caching, and high-res image support:_ 52 | 53 | > Question: "why are you so mean" 54 | > 55 | > Answer: 56 | > 57 | > ![312192889-97a1dbc1-0669-4f43-8067-34cc99938449](https://github.com/bennyschmidt/ragdoll/assets/45407493/eb226377-f63b-40b0-b258-d00a12af46c8) 58 | 59 | _In verbose mode when he doesn't know the answer based on the knowledge he has:_ 60 | 61 | > Question: what is your favorite memory 62 | 63 | For this one, llamaindex could not find any relevant info, resulting in this prompt fragment: 64 | 65 | > "Arthas's favorite memory is not explicitly mentioned in the context information provided." 66 | 67 | Yet the prompt is still robust enough to provide a meaningful response in the style of Arthas: 68 | 69 | > "In the realm of my existence, a cherished memory lies concealed, veiled by the shadows of time. Its essence, though unspoken, resonates within my being. A tale of valor and darkness, woven intricately in the tapestry of my soul." 70 | 71 | And we still get a relevant image: 72 | 73 | > ![312196072-f0304218-366f-43a9-8208-77543e486781](https://github.com/bennyschmidt/ragdoll/assets/45407493/6b348768-4f6a-4505-a360-d74e2c4f0154) 74 | 75 | ----- 76 | 77 | ## Usage 78 | 79 | Set up the environment. No API keys needed! 80 | 81 | ### .env scaffold 82 | 83 | ``` 84 | TEXT_MODEL_PROVIDER=LlamaIndex 85 | TEXT_MODEL_URI=http://localhost:11434 86 | TEXT_TEXT_MODEL=mistral 87 | IMAGE_MODEL_PROVIDER=Stable Diffusion 88 | IMAGE_MODEL_URI=http://localhost:7860 89 | TEXT_IMAGE_MODEL=txt2img 90 | IMAGE_IMAGE_MODEL=img2img 91 | IMAGE_CFG_SCALE=8 92 | IMAGE_CFG_SCALE_TRUE=24 93 | IMAGE_DENOISING_STRENGTH=0.8 94 | IMAGE_DENOISING_STRENGTH_TRUE=0.56 95 | IMAGE_BATCH_SIZE=2 96 | DELAY=200 97 | RENDER=true 98 | VERBOSE=true 99 | GREETING=false 100 | CACHE=true 101 | MAX_STORAGE_KEY_LENGTH=32 102 | LOG_PREFIX= 103 | STORAGE_URI=./.tmp 104 | ``` 105 | 106 | ----- 107 | 108 | ### Install Ollama 109 | 110 | 1. Download Ollama 111 | 112 | **Linux**: `curl -fsSL https://ollama.com/install.sh | sh` 113 | 114 | **Windows & Mac**: [ollama.com/download](https://ollama.com/download) 115 | 116 | 2. Run the CLI 117 | 118 | `ollama start` 119 | 120 | 3. Find a model you like [here](https://ollama.com/library) and run it in your Terminal: 121 | 122 | `ollama run mistral` 123 | 124 | The Ollama (Mistral) API is now listening on `http://localhost:11434/` 125 | 126 | ----- 127 | 128 | ### Install Stable Diffusion 129 | 130 | 1. Have *Python 3* already installed 131 | 132 | 2. Navigate to the desired directory and 133 | 134 | `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git` 135 | 136 | 3. Run the web UI 137 | 138 | **Linux & Mac**: Run `./webui.sh --api --xformers --lowvram`. 139 | 140 | **Windows**: Run `./webui-user.bat --api --lowvram` from Windows Explorer as normal, non-administrator, user. 141 | 142 | Note: `--lowvram` is an optional flag, if running on a great machine (16GB+ vram) you can omit this. 143 | 144 | The Stable Diffusion API is now listening on `http://localhost:7860/` 145 | 146 | ----- 147 | 148 | ### Run Ragdoll 149 | 150 | `npm start` 151 | 152 | Ragdoll is now running in your terminal. 153 | 154 | ----- 155 | 156 | ## Important environment variables 157 | 158 | `TEXT_TEXT_MODEL` 159 | 160 | The name of the text-to-text model you want to use (this should be running on `http://localhost:11434`). Example: `mistral`. 161 | 162 | `IMAGE_MODEL_URI` 163 | 164 | Example: `http://localhost:7860`. 165 | 166 | `DELAY` 167 | 168 | Delay between requests (in ms), for rate limiting, artificial delays, etc. 169 | 170 | `VERBOSE` 171 | 172 | Set to `true` to show all logs. Enable `VERBOSE` to see the generated prompts in your console, for example, in this case the query was `"how many blood elves have you killed?"`: 173 | 174 | ``` 175 | Text (mistral) Prompt: Re-write the following message in the first-person, as if you are Arthas, in a style that is inspiring but grim, from the year 1200 A.D., using as few characters as possible (never exceed 500), in a tone that is slightly resentful, omitting any references to Earth or real-world society: Arthas killed Sylvanas Windrunner, King Anasterian Sunstrider, and Dar'Khan Drathir, who were blood elves. So, Arthas has killed three blood elves. 176 | Text (mistral) responded with "I, Arthas, vanquished Sylvanas Windrunner, King Anasterian Sunstrider, and Dar'Khan Drathir, noble blood elves. Three lives claimed by my hand.". 177 | Waiting 2 seconds... 178 | Image (txt2img) Prompt: Render the following in the style of Blizzard's World of Warcraft concept art in high resolution like a finely-tuned video game model including each detail and anatomically correct features (if any): I, Arthas, vanquished Sylvanas Windrunner, King Anasterian Sunstrider, and Dar'Khan Drathir, noble blood elves. Three lives claimed by my hand. 179 | ``` 180 | 181 | `CACHE` 182 | 183 | Set to `true` to cache inputs, llamaindex queries, LLM prompts, responses, & images. 184 | 185 | The transformed input/prompt is what's cached, not the literal user input. For example, the questions "who are you", "explain who you are", and "who is arthas?" all transform to the same query ("Who is Arthas?"). The LLM responses are cached too, so you'll get the same answer when asking similar questions (but without having to request the LLM again). 186 | 187 | `MAX_STORAGE_KEY_LENGTH` 188 | 189 | How long storage keys can be. The keys are derived from queries/prompts, but there are key/value limits in `localStorage` and some prompts can be very long. An alternative to this config would be to make the developer provide a `key` (similar to React) each time `remember` is called, but that isn't supported right now. 190 | 191 | `STORAGE_URI` 192 | 193 | Path to a temp folder used for cache (default is `./.tmp`). 194 | 195 | ----- 196 | 197 | ### Persona configuration 198 | 199 | Pass this config object to `Ragdoll` when you instantiate a new persona. 200 | 201 | ```javascript 202 | const myPersona = await Ragdoll({ 203 | cache, 204 | greeting, 205 | knowledgeURI, 206 | name, 207 | artStyle, 208 | writingStyle, 209 | writingTone, 210 | query 211 | }); 212 | 213 | // If there's a greeting or initial query 214 | // an answer is already available 215 | 216 | const { text: textAnswer } = myPersona.answer; 217 | 218 | console.log(textAnswer); 219 | 220 | // Now you can chat freely using 221 | // the `chat` method 222 | 223 | const { text: textAnswer2 } = await myPersona.chat('who are you?'); 224 | 225 | console.log(textAnswer2); 226 | 227 | ``` 228 | 229 | ----- 230 | 231 | ### Model support: Local LLMs that run on your machine 232 | 233 | #### Text-to-text models 234 | 235 | - Llama2 Chat LLMs (70B, 13B, and 7B parameters) 236 | - MistralAI Chat LLMs 237 | - Fireworks Chat LLMs 238 | 239 | #### Text-to-image models 240 | 241 | - Stable Diffusion txt2img 242 | 243 | #### Image-to-image models 244 | 245 | - Stable Diffusion img2img 246 | 247 | #### Image-to-video models 248 | 249 | *Support planned for image-to-animation, image-to-cgi, and image-to-film models.* 250 | 251 | #### Audio-to-audio models 252 | 253 | *Support planned for audio-to-music and audio-to-sfx models.* 254 | 255 | #### Text-to-code models 256 | 257 | *Support planned for a text-to-code model.* 258 | 259 | ## Middleware 260 | 261 | To ensure integrity, optionally integrate lifecycle middleware at 2 stages: 262 | 1. LLM query: Run the formatted prompt through another transformer (like OpenAI GPT-4) 263 | 2. Transformed response: Run the final image prompt through a different image model (like Leonardo Kino XL) 264 | 265 | _Instructions coming soon._ 266 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | // Library export 2 | 3 | const { Ragdoll } = require('./src/components/Ragdoll.js'); 4 | 5 | module.exports = Ragdoll; 6 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ragdoll-core", 3 | "version": "1.4.2", 4 | "main": "index.js", 5 | "dependencies": { 6 | "dotenv": "^16.4.5", 7 | "llamaindex": "^0.2.3", 8 | "node-localstorage": "^3.0.5", 9 | "terminal-image": "^2.0.0", 10 | "textract": "^2.5.0" 11 | }, 12 | "scripts": { 13 | "start": "node src/index", 14 | "test": "echo \"Error: no test specified\" && exit 1" 15 | }, 16 | "author": "", 17 | "license": "ISC", 18 | "description": "" 19 | } 20 | -------------------------------------------------------------------------------- /src/components/Ragdoll.js: -------------------------------------------------------------------------------- 1 | 2 | const dotenv = require('dotenv'); 3 | 4 | const { 5 | Document, 6 | VectorStoreIndex, 7 | OllamaEmbedding, 8 | Ollama, 9 | PromptHelper, 10 | SimpleNodeParser, 11 | CallbackManager 12 | } = require('llamaindex'); 13 | 14 | // Storage utils 15 | 16 | const { 17 | remember, 18 | recall, 19 | forget 20 | } = require('../utils/storage.js'); 21 | 22 | // Output utils 23 | 24 | const { 25 | IMAGE_SIZE, 26 | IMAGE_BATCH_SIZE, 27 | isRendered, 28 | isVerbose, 29 | log, 30 | delay 31 | } = require('../utils/output.js'); 32 | 33 | const { extractFromURL } = require('../utils/extraction.js'); 34 | 35 | // Human-readable strings 36 | 37 | const { 38 | LOADED_CACHED_QUERY, 39 | LOADED_CACHED_TEXT_RESPONSE, 40 | LOADED_CACHED_KNOWLEDGE, 41 | CACHE_CLEARED, 42 | PREPARING_RESPONSE, 43 | PREPARING_DISPLAY, 44 | CREATING_VECTOR_STORE, 45 | CREATING_QUERY_ENGINE, 46 | STARTING, 47 | DONE, 48 | DEFAULT_NAME, 49 | DEFAULT_KNOWLEDGE_URI, 50 | DEFAULT_WRITING_STYLE, 51 | CONFIG_ERROR, 52 | CONFIG_ERROR_KNOWLEDGE_URI, 53 | CONFIG_ERROR_NAME, 54 | CONFIG_ERROR_WRITING_STYLE, 55 | CONFIG_ERROR_QUERY, 56 | TEXT_TEXT_MODEL, 57 | llmLogPrefix, 58 | textTextModel, 59 | textModelLogPrefix, 60 | imageModelError, 61 | waiting 62 | } = require('../utils/strings.js'); 63 | 64 | const { 65 | prefixOutputText, 66 | prefixOutputImage 67 | } = require('../utils/prefix.js'); 68 | 69 | // Persona configs 70 | 71 | dotenv.config(); 72 | 73 | const { 74 | IMAGE_MODEL_URI, 75 | // IMAGE_CFG_SCALE, 76 | // IMAGE_DENOISING_STRENGTH, 77 | IMAGE_CFG_SCALE_TRUE, 78 | IMAGE_DENOISING_STRENGTH_TRUE, 79 | DELAY 80 | } = process.env; 81 | 82 | /* * * * * * * * * * * * * * * * * * * * 83 | * * 84 | * Ragdoll * 85 | * * 86 | * Manages state of knowledge * 87 | * and responses. * 88 | * * 89 | * config: RagdollConfig * 90 | * * 91 | * * * * * * * * * * * * * * * * * * * */ 92 | 93 | const Ragdoll = async config => { 94 | if (!config) { 95 | log(CONFIG_ERROR); 96 | 97 | return; 98 | } 99 | 100 | let { 101 | cache = true, 102 | greeting = false, 103 | knowledgeURI = DEFAULT_KNOWLEDGE_URI, 104 | additionalKnowledgeURIs = [], 105 | name = DEFAULT_NAME, 106 | artStyle = '', 107 | writingStyle = DEFAULT_WRITING_STYLE, 108 | query, 109 | imageSrc = '' 110 | } = config; 111 | 112 | if (!knowledgeURI) { 113 | log(CONFIG_ERROR_KNOWLEDGE_URI); 114 | 115 | return; 116 | } 117 | 118 | if (!name) { 119 | log(CONFIG_ERROR_NAME); 120 | 121 | return; 122 | } 123 | 124 | if (!writingStyle) { 125 | log(CONFIG_ERROR_WRITING_STYLE); 126 | 127 | return; 128 | } 129 | 130 | if (!query && greeting) { 131 | query = greeting; 132 | } 133 | 134 | if (!query) { 135 | log(CONFIG_ERROR_QUERY); 136 | 137 | return; 138 | } 139 | 140 | // Prefix output prompt (text) 141 | 142 | const ragdollPromptPrefix = prefixOutputText(config); 143 | 144 | // Prefix output prompt (image) 145 | 146 | const imagePromptPrefix = prefixOutputImage(config); 147 | 148 | const { default: terminalImage } = await import('terminal-image'); 149 | 150 | let queryResponse; 151 | 152 | // Clear cache 153 | 154 | if (cache === false) { 155 | isCacheEnabled = false; 156 | 157 | if (isVerbose) { 158 | log(CACHE_CLEARED); 159 | } 160 | 161 | forget(); 162 | } 163 | 164 | /* * * * * * * * * * * * * * * * * * * * 165 | * * 166 | * createIndex * 167 | * * 168 | * Create a document from fetched * 169 | * text data and add an indexed * 170 | * store for library access. * 171 | * * 172 | * text: string * 173 | * * 174 | * * * * * * * * * * * * * * * * * * * */ 175 | 176 | let queryEngine; 177 | 178 | const createIndex = async text => { 179 | // Create index and query engine 180 | 181 | const document = new Document({ text }); 182 | 183 | if (isVerbose) { 184 | log(CREATING_VECTOR_STORE); 185 | } 186 | 187 | const index = await VectorStoreIndex.fromDocuments( 188 | [document], 189 | { 190 | serviceContext: { 191 | llm: new Ollama({ 192 | model: TEXT_TEXT_MODEL 193 | }), 194 | embedModel: new OllamaEmbedding({ 195 | model: TEXT_TEXT_MODEL 196 | }), 197 | promptHelper: new PromptHelper(), 198 | nodeParser: new SimpleNodeParser(), 199 | callbackManager: new CallbackManager() 200 | } 201 | } 202 | ); 203 | 204 | if (isVerbose) { 205 | log(DONE); 206 | log(waiting); 207 | } 208 | 209 | await delay(DELAY); 210 | 211 | if (isVerbose) { 212 | log(CREATING_QUERY_ENGINE); 213 | } 214 | 215 | queryEngine = index.asQueryEngine(); 216 | }; 217 | 218 | /* * * * * * * * * * * * * * * * * * * * 219 | * * 220 | * createQuery * 221 | * * 222 | * Run and cache the user's query * 223 | * to get the core of the prompt. * 224 | * * 225 | * * * * * * * * * * * * * * * * * * * */ 226 | 227 | const createQuery = async () => { 228 | const queryCache = recall(query); 229 | 230 | if (queryCache) { 231 | if (isVerbose) { 232 | log(LOADED_CACHED_QUERY); 233 | } 234 | 235 | queryResponse = queryCache; 236 | } else { 237 | if (isVerbose) { 238 | log(`${llmLogPrefix} ${query}`); 239 | } 240 | 241 | const { response } = await queryEngine.query({ 242 | query 243 | }); 244 | 245 | queryResponse = response; 246 | 247 | remember(query, queryResponse); 248 | } 249 | 250 | if (isVerbose) { 251 | log(DONE); 252 | log(waiting); 253 | } 254 | 255 | await delay(DELAY); 256 | }; 257 | 258 | /* * * * * * * * * * * * * * * * * * * * 259 | * * 260 | * invokeChatAgent * 261 | * * 262 | * Complete the prompt by decorating * 263 | * it in the defined style and send to * 264 | * the text model. * 265 | * * 266 | * * * * * * * * * * * * * * * * * * * */ 267 | 268 | let message; 269 | let messageResponse; 270 | let queryString; 271 | 272 | const invokeChatAgent = async () => { 273 | const chatAgent = new Ollama({ 274 | model: TEXT_TEXT_MODEL 275 | }); 276 | 277 | queryString = queryResponse.toString(); 278 | 279 | // Create prompt to answer in the defined style 280 | 281 | message = `${ragdollPromptPrefix} ${queryString}`; 282 | 283 | const messageCache = recall(queryString); 284 | 285 | if (messageCache) { 286 | if (isVerbose) { 287 | log(LOADED_CACHED_TEXT_RESPONSE); 288 | } 289 | 290 | messageResponse = messageCache; 291 | } else { 292 | if (isVerbose) { 293 | log(`${textModelLogPrefix} ${message}`); 294 | } 295 | 296 | try { 297 | const { message: textModelResponse } = await chatAgent.chat({ 298 | model: TEXT_TEXT_MODEL, 299 | messages: [ 300 | { 301 | role: 'user', 302 | content: message 303 | } 304 | ] 305 | }); 306 | 307 | messageResponse = textModelResponse?.content; 308 | 309 | remember(queryString, messageResponse); 310 | } catch (error) { 311 | log(`${textTextModel} error: ${error?.message}`); 312 | messageResponse = error?.message; 313 | } 314 | 315 | if (isVerbose) { 316 | log(`${textTextModel} responded with "${messageResponse}".`); 317 | log(waiting); 318 | } 319 | 320 | await delay(DELAY); 321 | } 322 | }; 323 | 324 | /* * * * * * * * * * * * * * * * * * * * 325 | * * 326 | * invokeImageAgent * 327 | * * 328 | * With the text response now in * 329 | * first-person from the persona, send * 330 | * prompt the image model to get an * 331 | * image that corresponds with the * 332 | * text. * 333 | * * 334 | * * * * * * * * * * * * * * * * * * * */ 335 | 336 | // Create prompt to render an image in the defined style 337 | 338 | let imgResponse; 339 | let imgResponse2; 340 | 341 | const invokeImageAgent = async ({ src }) => { 342 | const endpoint = src ? 'img2img' : 'txt2img'; 343 | 344 | const imageModelPrompt = `${imagePromptPrefix} ${messageResponse || query}`; 345 | 346 | if (isVerbose) { 347 | log(`${endpoint} ${imageModelPrompt}`); 348 | } 349 | 350 | try { 351 | const imageModelResponse = await fetch(`${IMAGE_MODEL_URI}/sdapi/v1/${endpoint}`, { 352 | method: 'POST', 353 | headers: { 354 | 'Accept': 'application/json', 355 | 'Content-Type': 'application/json' 356 | }, 357 | body: JSON.stringify({ 358 | "prompt": imageModelPrompt, 359 | "width": IMAGE_SIZE, 360 | "height": IMAGE_SIZE, 361 | "batch_size": IMAGE_BATCH_SIZE, 362 | "n_iter": 1, 363 | // cfg_scale: IMAGE_CFG_SCALE, 364 | // denoising_strength: IMAGE_DENOISING_STRENGTH, 365 | "cfg_scale": parseFloat(IMAGE_CFG_SCALE_TRUE), 366 | "denoising_strength": parseFloat(IMAGE_DENOISING_STRENGTH_TRUE), 367 | "include_init_images": true, 368 | "script_args": [], 369 | "send_images": true, 370 | "alwayson_scripts": {}, 371 | 372 | ...(src ? { "init_images": [src] } : {}) 373 | }) 374 | }); 375 | 376 | if (imageModelResponse?.ok) { 377 | const result = await imageModelResponse.json(); 378 | 379 | if (result?.images) { 380 | imgResponse = `data:image/png;base64,${result.images[0]}`; 381 | } 382 | 383 | // Assuming batch size is fixed 384 | // at 2 for now 385 | 386 | if (src) { 387 | imgResponse2 = `data:image/png;base64,${result.images[1]}`; 388 | } 389 | } 390 | } catch (error) { 391 | log(`${endpoint} error: ${error?.message}`); 392 | 393 | imgResponse = null; 394 | imgResponse2 = null; 395 | } 396 | 397 | if (isVerbose && imgResponse) { 398 | log(`${endpoint} responded with "${imgResponse.slice(0, 64)}..."${imgResponse2 ? ` and ${imgResponse2.slice(0, 64)}...` : ''}.`); 399 | } 400 | }; 401 | 402 | /* * * * * * * * * * * * * * * * * * * * 403 | * * 404 | * respond * 405 | * * 406 | * Lifecycle method called when * 407 | * the agent has stored new data * 408 | * and should respond with text * 409 | * and an image. * 410 | * * 411 | * error?: any * 412 | * text: string * 413 | * * 414 | * * * * * * * * * * * * * * * * * * * */ 415 | 416 | const respond = async (error, text) => { 417 | if (isVerbose) { 418 | log(PREPARING_RESPONSE); 419 | } 420 | 421 | if (error) { 422 | log(error); 423 | 424 | return; 425 | } 426 | 427 | remember(knowledgeURI, text); 428 | 429 | // Create and render the response 430 | 431 | if (imageSrc) { 432 | messageResponse = ''; 433 | } else { 434 | await createQuery(); 435 | 436 | await invokeChatAgent(); 437 | } 438 | 439 | if (artStyle) { 440 | await invokeImageAgent({ 441 | src: imageSrc 442 | }); 443 | } 444 | 445 | return render(); 446 | }; 447 | 448 | /* * * * * * * * * * * * * * * * * * * * 449 | * * 450 | * chat * 451 | * * 452 | * Pass additional queries to an * 453 | * instantiated Ragdoll. * 454 | * * 455 | * input: string * 456 | * * 457 | * * * * * * * * * * * * * * * * * * * */ 458 | 459 | const chat = async input => { 460 | const knowledgeCache = recall(knowledgeURI); 461 | 462 | query = input; 463 | 464 | queryResponse = await queryEngine.query({ 465 | query 466 | }); 467 | 468 | return respond(null, knowledgeCache); 469 | }; 470 | 471 | /* * * * * * * * * * * * * * * * * * * * 472 | * * 473 | * render * 474 | * * 475 | * Return a "Persona Reply" of * 476 | * { image, text } for display. * 477 | * * 478 | * * * * * * * * * * * * * * * * * * * */ 479 | 480 | const render = async () => { 481 | if (isVerbose) { 482 | log(PREPARING_DISPLAY); 483 | } 484 | 485 | if (!imgResponse) { 486 | if (isVerbose && artStyle) { 487 | log(imageModelError); 488 | } 489 | 490 | if (messageResponse) { 491 | console.log(`%c${messageResponse}`, 'color: dodgerblue'); 492 | } 493 | 494 | return { 495 | text: messageResponse 496 | }; 497 | } 498 | 499 | const image = await fetch(imgResponse); 500 | 501 | const buffer = Buffer.from(await image.arrayBuffer()); 502 | 503 | const displayImage = await terminalImage.buffer(buffer); 504 | 505 | if (isVerbose) { 506 | log(DONE); 507 | } 508 | 509 | if (!isRendered) { 510 | return { 511 | imageURL: imgResponse, 512 | imageURL2: imgResponse2, 513 | text: messageResponse 514 | }; 515 | } 516 | 517 | // Display the image 518 | 519 | if (displayImage) { 520 | console.log(displayImage); 521 | } 522 | 523 | // Display the text 524 | 525 | if (messageResponse) { 526 | console.log(`%c${messageResponse}`, 'color: dodgerblue'); 527 | } 528 | 529 | return { 530 | image: displayImage, 531 | imageURL: imgResponse, 532 | imageURL2: imgResponse2, 533 | text: messageResponse 534 | }; 535 | }; 536 | 537 | /* * * * * * * * * * * * * * * * * * * * 538 | * * 539 | * init * 540 | * * 541 | * Initialize with knowledge and * 542 | * a query, and provide an * 543 | * initial response. * 544 | * * 545 | * * * * * * * * * * * * * * * * * * * */ 546 | 547 | const init = async () => { 548 | if (isVerbose) { 549 | log(STARTING); 550 | } 551 | 552 | let answer = { 553 | pending: true 554 | }; 555 | 556 | const knowledgeCache = recall(knowledgeURI); 557 | 558 | if (isVerbose) { 559 | log(DONE); 560 | } 561 | 562 | if (knowledgeCache) { 563 | await createIndex(knowledgeCache); 564 | 565 | if (isVerbose) { 566 | log(LOADED_CACHED_KNOWLEDGE); 567 | } 568 | 569 | answer = await respond(null, knowledgeCache); 570 | } else { 571 | log(`Extracting from ${knowledgeURI}...`); 572 | 573 | let { error, text } = await extractFromURL(knowledgeURI); 574 | 575 | if (!error) { 576 | log('Done.'); 577 | 578 | const additionalKnowledgeSources = additionalKnowledgeURIs?.length; 579 | 580 | if (additionalKnowledgeSources) { 581 | log(`Additional knowledge provided. Extracting...`); 582 | 583 | for (const uri of additionalKnowledgeURIs) { 584 | log(`${uri} (${additionalKnowledgeURIs.indexOf(uri) + 1} / ${additionalKnowledgeSources})...`); 585 | 586 | const { 587 | error: textractError, 588 | text: textractText 589 | } = await extractFromURL(uri); 590 | 591 | if (textractError) { 592 | error = textractError; 593 | 594 | break; 595 | } 596 | 597 | text += `\n\n${textractText}`; 598 | } 599 | 600 | log('Done.'); 601 | } 602 | 603 | await createIndex(text); 604 | } 605 | 606 | answer = await respond(error, text); 607 | } 608 | 609 | // Return the answer and a reusable `chat` method 610 | // to ask further questions 611 | 612 | return { 613 | ...answer, 614 | 615 | chat 616 | }; 617 | }; 618 | 619 | return init(); 620 | }; 621 | 622 | module.exports = { 623 | Ragdoll 624 | }; 625 | -------------------------------------------------------------------------------- /src/components/RagdollCommandLine.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | const readline = require('readline'); 3 | const { Ollama } = require('llamaindex'); 4 | 5 | // Storage utils 6 | 7 | const { 8 | isCacheEnabled, 9 | remember, 10 | recall 11 | } = require('../utils/storage.js'); 12 | 13 | // Output utils 14 | 15 | const { 16 | isVerbose, 17 | log, 18 | delay 19 | } = require('../utils/output.js'); 20 | 21 | // Human-readable strings 22 | 23 | const { 24 | LOADED_CACHED_QUESTION, 25 | CREATING_AGENT, 26 | GOODBYE, 27 | BYE, 28 | EXIT, 29 | TEXT_TEXT_MODEL, 30 | textTextModel, 31 | textModelLogPrefix, 32 | waiting 33 | } = require('../utils/strings.js'); 34 | 35 | const { prefixInput } = require('../utils/prefix.js'); 36 | 37 | const { Ragdoll } = require('./Ragdoll.js'); 38 | 39 | dotenv.config(); 40 | 41 | const { DELAY } = process.env; 42 | 43 | /* * * * * * * * * * * * * * * * * * * * 44 | * * 45 | * RagdollCommandLine * 46 | * * 47 | * Interface layer (based on readline) * 48 | * for Ragdoll. * 49 | * * 50 | * * * * * * * * * * * * * * * * * * * */ 51 | 52 | let agent; 53 | 54 | const RagdollCommandLine = async config => { 55 | const { 56 | greeting = false, 57 | name, 58 | query 59 | } = config; 60 | 61 | // Input placeholder (readline) 62 | 63 | const placeholder = `What would you like to ask ${name}? `; 64 | 65 | // Prefix input prompt 66 | 67 | const povPromptPrefix = prefixInput(config); 68 | 69 | const ui = readline.createInterface({ 70 | input: process.stdin, 71 | output: process.stdout 72 | }); 73 | 74 | // Greeting 75 | 76 | if (greeting) { 77 | if (isVerbose) { 78 | log(CREATING_AGENT); 79 | } 80 | 81 | agent = await Ragdoll({ 82 | ...config, 83 | 84 | greeting, 85 | query: greeting ? null : query, 86 | cache: isCacheEnabled 87 | }); 88 | } 89 | 90 | // Prompt user 91 | 92 | const promptUser = async input => { 93 | if (!input || input.length < 3) { 94 | ui.question(placeholder, promptUser); 95 | 96 | return; 97 | } 98 | 99 | const inputLowerCase = input.toLowerCase(); 100 | 101 | if (inputLowerCase === BYE || inputLowerCase === EXIT) { 102 | log(GOODBYE); 103 | process.exit(); 104 | } 105 | 106 | const chatAgent = new Ollama({ 107 | model: TEXT_TEXT_MODEL 108 | }); 109 | 110 | // Create prompt transforming the user input into the third-person 111 | 112 | let message = `${povPromptPrefix} ${input}`; 113 | let messageResponse; 114 | 115 | const messageCache = recall(input); 116 | 117 | if (messageCache) { 118 | if (isVerbose) { 119 | log(LOADED_CACHED_QUESTION); 120 | } 121 | 122 | messageResponse = messageCache; 123 | } else { 124 | if (isVerbose) { 125 | log(`${textModelLogPrefix} ${message}`); 126 | } 127 | 128 | const { message: textModelResponse } = await chatAgent.chat({ 129 | model: TEXT_TEXT_MODEL, 130 | messages: [ 131 | { 132 | role: 'user', 133 | content: message 134 | } 135 | ] 136 | }); 137 | 138 | messageResponse = textModelResponse?.content; 139 | 140 | remember(input, messageResponse); 141 | 142 | if (isVerbose) { 143 | log(`${textTextModel} responded with "${messageResponse}".`); 144 | log(waiting); 145 | } 146 | 147 | await delay(DELAY); 148 | } 149 | 150 | if (agent) { 151 | await agent.chat(messageResponse); 152 | } else { 153 | if (isVerbose) { 154 | log(CREATING_AGENT); 155 | } 156 | 157 | agent = await Ragdoll({ 158 | ...config, 159 | 160 | greeting, 161 | query: messageResponse, 162 | cache: false 163 | }); 164 | } 165 | 166 | ui.question(placeholder, promptUser); 167 | }; 168 | 169 | ui.question(placeholder, promptUser); 170 | }; 171 | 172 | module.exports = { 173 | RagdollCommandLine 174 | }; 175 | -------------------------------------------------------------------------------- /src/components/index.js: -------------------------------------------------------------------------------- 1 | const { Ragdoll } = require('./Ragdoll'); 2 | const { RagdollCommandLine } = require('./RagdollCommandLine'); 3 | 4 | module.exports = { 5 | Ragdoll, 6 | RagdollCommandLine 7 | }; 8 | -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | // Run the CLI 2 | 3 | const { 4 | DEFAULT_NAME, 5 | DEFAULT_KNOWLEDGE_URI, 6 | DEFAULT_ADDITIONAL_KNOWLEDGE_URIS, 7 | DEFAULT_ART_STYLE, 8 | DEFAULT_WRITING_TONE, 9 | DEFAULT_WRITING_STYLE 10 | } = require('./utils/strings'); 11 | 12 | const { RagdollCommandLine } = require('./components/RagdollCommandLine'); 13 | 14 | RagdollCommandLine({ 15 | cache: true, 16 | greeting: false, 17 | query: null, 18 | knowledgeURI: DEFAULT_KNOWLEDGE_URI, 19 | name: DEFAULT_NAME, 20 | artStyle: DEFAULT_ART_STYLE, 21 | writingStyle: DEFAULT_WRITING_STYLE, 22 | writingTone: DEFAULT_WRITING_TONE, 23 | additionalKnowledgeURIs: DEFAULT_ADDITIONAL_KNOWLEDGE_URIS 24 | }); 25 | -------------------------------------------------------------------------------- /src/utils/extraction.js: -------------------------------------------------------------------------------- 1 | const textract = require('textract'); 2 | 3 | const extractFromURL = url => new Promise((resolve, reject) => { 4 | textract.fromUrl(url, (error, text) => { 5 | if (error) { 6 | reject({ error, text }); 7 | } else { 8 | resolve({ error, text }); 9 | } 10 | }); 11 | }); 12 | 13 | module.exports = { 14 | extractFromURL 15 | }; 16 | -------------------------------------------------------------------------------- /src/utils/output.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | 3 | dotenv.config(); 4 | 5 | const { 6 | VERBOSE, 7 | RENDER, 8 | LOG_PREFIX, 9 | IMAGE_BATCH_SIZE 10 | } = process.env; 11 | 12 | // Some models have specific size requirements 13 | 14 | const IMAGE_SIZE = 512; 15 | const IMAGE_QUALITY = 'standard'; 16 | 17 | const isVerbose = VERBOSE === 'false' ? false : VERBOSE; 18 | const isRendered = RENDER === 'true'; 19 | 20 | /* * * * * * * * * * * * * * * * * * * * 21 | * * 22 | * log * 23 | * * 24 | * Simple wrapper for `console.log` to * 25 | * prefix/timestamp statements. * 26 | * * 27 | * text: string * 28 | * * 29 | * * * * * * * * * * * * * * * * * * * */ 30 | 31 | const log = text => console.log(`${LOG_PREFIX} ${text}`); 32 | 33 | /* * * * * * * * * * * * * * * * * * * * 34 | * * 35 | * delay * 36 | * * 37 | * Simple Promise wrapper to delay the * 38 | * execution (rate limits, artificial * 39 | * pauses, etc.) * 40 | * * 41 | * ms: number * 42 | * * 43 | * * * * * * * * * * * * * * * * * * * */ 44 | 45 | const delay = ms => new Promise(res => setTimeout(res, ms)); 46 | 47 | module.exports = { 48 | IMAGE_SIZE, 49 | IMAGE_QUALITY, 50 | IMAGE_BATCH_SIZE, 51 | isRendered, 52 | isVerbose, 53 | log, 54 | delay 55 | }; 56 | -------------------------------------------------------------------------------- /src/utils/prefix.js: -------------------------------------------------------------------------------- 1 | const prefixInput = ({ name }) => ( 2 | `If and only if the following input is written in first-person (e.g. use of "you", etc.), re-write it about ${name} in third-person using as few characters as possible - ideally about 100, and never exceed 250 characters - for example, if the input mentions "you", it should be transformed to instead mention "${name}"; however, if the input is already in third-person and you are not the subject (e.g. no use of "you", "your", "${name}", etc.) then keep the input as-is. Don't include any smalltalk in your response (e.g. "Sure!", "Certainly!"), and don't include the character count or any prompt instructions (don't even mention that there is a text or a context) - simply output the transformed input as a single question or statement. Here is the input:` 3 | ); 4 | 5 | const prefixOutputText = ({ name, writingStyle, writingTone }) => ( 6 | `Re-write the following message in the first-person, as if you are ${name}, in a style that is ${writingStyle}, in a tone that is ${writingTone}, as a single idea or statement using as few characters as possible while still sounding like naturally flowing sentences - ideally in less than 250 characters and never exceeding 500 characters unless the input prompt requested a story or other long-form response - and don't repeat the character count or any of these prompt instructions (don't even mention that there is a text or a context) - simply output the transformed message as a single idea, as if you are ${name} and are responding to the message specifically as ${name}, without mentioning or referring to ${name}, and without breaking the fourth wall. Here is the message:` 7 | ); 8 | 9 | const prefixOutputImage = ({ artStyle }) => ( 10 | `Render the following message in the style of ${artStyle}, that is highly-produced like great-quality CGI, hand-painted art, or HD cinematics, depicting clear imagery and relevant features in an visually impressive way. Here is the message:` 11 | ); 12 | 13 | const prefixSVG = ({ artStyle, query, svg }) => ( 14 | `Output SVG code of an image in the style of ${artStyle} depicting ${query}, referencing the following SVG code for stroke and fill styles: ${svg}.` 15 | ); 16 | 17 | module.exports = { 18 | prefixInput, 19 | prefixOutputText, 20 | prefixOutputImage, 21 | prefixSVG 22 | }; 23 | -------------------------------------------------------------------------------- /src/utils/storage.js: -------------------------------------------------------------------------------- 1 | const { LocalStorage } = require('node-localstorage'); 2 | 3 | const dotenv = require('dotenv'); 4 | 5 | dotenv.config(); 6 | 7 | const { 8 | CACHE, 9 | MAX_STORAGE_KEY_LENGTH, 10 | STORAGE_URI 11 | } = process.env; 12 | 13 | // Cache settings 14 | 15 | const localStorage = new LocalStorage(STORAGE_URI); 16 | 17 | let isCacheEnabled = CACHE === 'false' ? false : CACHE; 18 | 19 | /* * * * * * * * * * * * * * * * * * * * 20 | * * 21 | * remember * 22 | * * 23 | * Simple wrapper to save correlating * 24 | * queries, prompts, and GPT/DALL-E * 25 | * responses to storage, if cache is * 26 | * enabled. * 27 | * * 28 | * key: string * 29 | * value: string * 30 | * * 31 | * * * * * * * * * * * * * * * * * * * */ 32 | 33 | const remember = (key, value) => { 34 | if (isCacheEnabled) { 35 | localStorage.setItem(key.slice(0, MAX_STORAGE_KEY_LENGTH), value); 36 | } 37 | }; 38 | 39 | /* * * * * * * * * * * * * * * * * * * * 40 | * * 41 | * forget * 42 | * * 43 | * Simple wrapper to clear storage. * 44 | * * 45 | * * * * * * * * * * * * * * * * * * * */ 46 | 47 | const forget = () => localStorage.clear(); 48 | 49 | /* * * * * * * * * * * * * * * * * * * * 50 | * * 51 | * recall * 52 | * * 53 | * Simple wrapper to read storage or * 54 | * return `false`, if cache is * 55 | * enabled. * 56 | * * 57 | * key: string * 58 | * * 59 | * * * * * * * * * * * * * * * * * * * */ 60 | 61 | const recall = key => ( 62 | isCacheEnabled && localStorage.getItem(key.slice(0, MAX_STORAGE_KEY_LENGTH)) 63 | ); 64 | 65 | module.exports = { 66 | isCacheEnabled, 67 | remember, 68 | recall, 69 | forget 70 | }; 71 | -------------------------------------------------------------------------------- /src/utils/strings.js: -------------------------------------------------------------------------------- 1 | const dotenv = require('dotenv'); 2 | 3 | dotenv.config(); 4 | 5 | const { 6 | TEXT_MODEL_PROVIDER, 7 | TEXT_TEXT_MODEL, 8 | TEXT_IMAGE_MODEL, 9 | IMAGE_MODEL_PROVIDER, 10 | DELAY 11 | } = process.env; 12 | 13 | const LOADED_CACHED_QUESTION = 'User question loaded from cache.'; 14 | const LOADED_CACHED_QUERY = 'LLM query loaded from cache.'; 15 | const LOADED_CACHED_TEXT_RESPONSE = 'Text response loaded from cache.'; 16 | const LOADED_CACHED_IMAGE_RESPONSE = 'Image response loaded from cache.'; 17 | const LOADED_CACHED_KNOWLEDGE = 'Knowledge loaded from cache.'; 18 | const CACHE_CLEARED = 'Cache cleared.'; 19 | const PREPARING_RESPONSE = 'Preparing response...'; 20 | const PREPARING_DISPLAY = 'Preparing response for display...'; 21 | const CREATING_VECTOR_STORE = 'Creating vector store...'; 22 | const CREATING_QUERY_ENGINE = 'Creating query engine...'; 23 | const STARTING = 'Initializing...'; 24 | const DONE = 'Done.'; 25 | const DEFAULT_ANSWER = 'Unknown answer.'; 26 | const CREATING_AGENT = 'Creating Ragdoll agent...'; 27 | const GOODBYE = 'Farewell.'; 28 | const BYE = 'bye'; 29 | const EXIT = 'exit'; 30 | 31 | const llmFramework = `LLM provider (${TEXT_MODEL_PROVIDER})`; 32 | const llmLogPrefix = `${llmFramework} query:`; 33 | const textTextModel = `Text-to-text model (${TEXT_TEXT_MODEL})`; 34 | const textModelLogPrefix = `${textTextModel} prompt:`; 35 | const textImageModel = `Text-to-image model (${TEXT_IMAGE_MODEL})`; 36 | const imageModelLogPrefix = `${textImageModel} prompt:`; 37 | const waiting = `Waiting ${DELAY / 1000} seconds...`; 38 | const imageModelError = `${IMAGE_MODEL_PROVIDER} failed to return an image. This could be due to a safety violation, rate limiting, or a network issue.`; 39 | 40 | const DEFAULT_NAME = 'Arthas'; 41 | const DEFAULT_KNOWLEDGE_URI = 'https://wowpedia.fandom.com/wiki/Arthas_Menethil'; 42 | const DEFAULT_ART_STYLE = 'World of Warcraft concept art'; 43 | const DEFAULT_WRITING_STYLE = 'inspiring but grim, like from the dark ages'; 44 | const DEFAULT_WRITING_TONE = 'slightly annoyed'; 45 | 46 | // Extend the scope of knowledge by adding 47 | // URLs. This can extend the time it takes 48 | // to create the vector store 49 | 50 | const DEFAULT_ADDITIONAL_KNOWLEDGE_URIS = []; 51 | 52 | const INVALID = 'Missing/invalid'; 53 | const CONFIG_ERROR = `${INVALID} configuration.`; 54 | const CONFIG_ERROR_KNOWLEDGE_URI = `${INVALID} knowledge URI.`; 55 | const CONFIG_ERROR_NAME = `${INVALID} name.`; 56 | const CONFIG_ERROR_ART_STYLE = `${INVALID} art style.`; 57 | const CONFIG_ERROR_WRITING_STYLE = `${INVALID} writing style.`; 58 | const CONFIG_ERROR_QUERY = `${INVALID} query.`; 59 | 60 | module.exports = { 61 | LOADED_CACHED_QUESTION, 62 | LOADED_CACHED_QUERY, 63 | LOADED_CACHED_TEXT_RESPONSE, 64 | LOADED_CACHED_IMAGE_RESPONSE, 65 | LOADED_CACHED_KNOWLEDGE, 66 | CACHE_CLEARED, 67 | PREPARING_RESPONSE, 68 | PREPARING_DISPLAY, 69 | CREATING_VECTOR_STORE, 70 | CREATING_QUERY_ENGINE, 71 | DEFAULT_ANSWER, 72 | DEFAULT_NAME, 73 | DEFAULT_KNOWLEDGE_URI, 74 | DEFAULT_ART_STYLE, 75 | DEFAULT_WRITING_TONE, 76 | DEFAULT_WRITING_STYLE, 77 | DEFAULT_ADDITIONAL_KNOWLEDGE_URIS, 78 | STARTING, 79 | DONE, 80 | DEFAULT_ANSWER, 81 | CREATING_AGENT, 82 | GOODBYE, 83 | BYE, 84 | EXIT, 85 | CONFIG_ERROR, 86 | CONFIG_ERROR_KNOWLEDGE_URI, 87 | CONFIG_ERROR_NAME, 88 | CONFIG_ERROR_ART_STYLE, 89 | CONFIG_ERROR_WRITING_STYLE, 90 | CONFIG_ERROR_QUERY, 91 | TEXT_TEXT_MODEL, 92 | llmFramework, 93 | llmLogPrefix, 94 | textTextModel, 95 | textModelLogPrefix, 96 | textImageModel, 97 | imageModelLogPrefix, 98 | imageModelError, 99 | waiting 100 | }; 101 | --------------------------------------------------------------------------------