├── .gitignore ├── CCfinal.jpg ├── Examples.txt ├── LICENSE ├── Readme-Choosing-A-Model.md ├── Readme-Endpoints.md ├── Readme-How-To-Use-CC.md ├── Readme-Inference-Chaining.md ├── Readme-Install.md ├── Readme-Prompt-Formatting.md ├── Readme-Prompt-Reference.md ├── Readme-Setup.md ├── Sample Kobold Stuff ├── hermes16.kcpps └── sampleLaunchKoboldBat.bat ├── WhatIsCC.md ├── bookmarklets.html ├── copyconqueror.js ├── dev.md ├── icon.jpg ├── inferenceInterface.js ├── package-lock.json ├── package.json ├── readme.md ├── responsengine.js ├── settingSaver.js ├── setup.js ├── textengine.js ├── x-mac-start-no-nodemon.sh ├── x-mac-start-nodemon.sh ├── xy-linux-mac-install.sh ├── xy-linux-mac-update.sh ├── y-linux-start-no-nodemon.sh ├── y-linux-start-nodemon.sh ├── z-instalCC.bat ├── z-runCC.bat ├── z-runCCnodeMon.bat └── z-updateCC.bat /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | lerna-debug.log* 8 | .pnpm-debug.log* 9 | 10 | # Diagnostic reports (https://nodejs.org/api/report.html) 11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 12 | 13 | # Runtime data 14 | pids 15 | *.pid 16 | *.seed 17 | *.pid.lock 18 | 19 | # Directory for instrumented libs generated by jscoverage/JSCover 20 | lib-cov 21 | 22 | # Coverage directory used by tools like istanbul 23 | coverage 24 | *.lcov 25 | 26 | # nyc test coverage 27 | .nyc_output 28 | 29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 30 | .grunt 31 | 32 | # Bower dependency directory (https://bower.io/) 33 | bower_components 34 | 35 | # node-waf configuration 36 | .lock-wscript 37 | 38 | # Compiled binary addons (https://nodejs.org/api/addons.html) 39 | build/Release 40 | 41 | # Dependency directories 42 | node_modules/ 43 | jspm_packages/ 44 | 45 | # Snowpack dependency directory (https://snowpack.dev/) 46 | web_modules/ 47 | 48 | # TypeScript cache 49 | *.tsbuildinfo 50 | 51 | # Optional npm cache directory 52 | .npm 53 | 54 | # Optional eslint cache 55 | .eslintcache 56 | 57 | # Optional stylelint cache 58 | .stylelintcache 59 | 60 | # Microbundle cache 61 | .rpt2_cache/ 62 | .rts2_cache_cjs/ 63 | .rts2_cache_es/ 64 | .rts2_cache_umd/ 65 | 66 | # Optional REPL history 67 | .node_repl_history 68 | 69 | # Output of 'npm pack' 70 | *.tgz 71 | 72 | # Yarn Integrity file 73 | .yarn-integrity 74 | 75 | # dotenv environment variable files 76 | .env 77 | .env.development.local 78 | .env.test.local 79 | .env.production.local 80 | .env.local 81 | 82 | # parcel-bundler cache (https://parceljs.org/) 83 | .cache 84 | .parcel-cache 85 | 86 | # Next.js build output 87 | .next 88 | out 89 | 90 | # Nuxt.js build / generate output 91 | .nuxt 92 | dist 93 | 94 | # Gatsby files 95 | .cache/ 96 | # Comment in the public line in if your project uses Gatsby and not Next.js 97 | # https://nextjs.org/blog/next-9-1#public-directory-support 98 | # public 99 | 100 | # vuepress build output 101 | .vuepress/dist 102 | 103 | # vuepress v2.x temp and cache directory 104 | .temp 105 | .cache 106 | 107 | # Docusaurus cache and generated files 108 | .docusaurus 109 | 110 | # Serverless directories 111 | .serverless/ 112 | 113 | # FuseBox cache 114 | .fusebox/ 115 | 116 | # DynamoDB Local files 117 | .dynamodb/ 118 | 119 | # TernJS port file 120 | .tern-port 121 | 122 | # Stores VSCode versions used for testing VSCode extensions 123 | .vscode-test 124 | 125 | # yarn v2 126 | .yarn/cache 127 | .yarn/unplugged 128 | .yarn/build-state.yml 129 | .yarn/install-state.gz 130 | .pnp.* 131 | 132 | # configs 133 | 0endpointsKey.json 134 | 0openAiKey.json 135 | 0endpoints.json 136 | 0instructions.json 137 | 0identities.json 138 | 0generationSettings.json 139 | 0formats.json 140 | charMemoryEngine.js 141 | 142 | #exclude soundtrack for now 143 | .Soundtrack/ -------------------------------------------------------------------------------- /CCfinal.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aseichter2007/ClipboardConqueror/00253b13b055f3c2cb9e99b0c3028383209a14c5/CCfinal.jpg -------------------------------------------------------------------------------- /Examples.txt: -------------------------------------------------------------------------------- 1 | const ioHook = require('iohook'); 2 | 3 | // Register a callback for the escape key 4 | ioHook.on('keydown', (event) => { 5 | if (event.keycode === 1) { 6 | // Escape key pressed 7 | console.log('Escape key pressed. Exiting...'); 8 | ioHook.stop(); 9 | } 10 | }); 11 | 12 | // Start the hook 13 | ioHook.start(); 14 | 15 | // Handle exit gracefully 16 | process.on('SIGINT', () => { 17 | ioHook.stop(); 18 | process.exit(); 19 | }); 20 | 21 | console.log('Press Escape to exit.'); 22 | 23 | // Keep the script running 24 | process.stdin.resume(); 25 | 26 | 27 | 28 | 29 | Copy the following line 30 | |||introduction| 31 | 32 | When you hear the chime, paste the response. 33 | 34 | That first one is scripted. You can create your own saved memory and retrieveit later on. 35 | 36 | 37 | 38 | test it out! copy this paragraph: 39 | 40 | Okay! Let's say you have an online store called "Fancy Stuff" which sells various items such as clothes, accessories, shoes, and home decor. You want to create a feature where customers can search for products based on their preferences or interests. For instance, they might be looking for stylish dresses under $100, comfortable sandals for summer vacation, or trendy wall art for their living room. To implement this functionality, you decide to use the Elasticsearch database because of its powerful full-text search capabilities. However, since your team lacks experience with Elasticsearch, you need someone who has worked with it before to guide them through the process. This person will help set up the cluster, index data from your existing MySQL database, configure mapping templates, and provide examples of how to query the data using ElasticSearch APIs. Additionally, they should also explain how to integrate Elasticsearch into your website's frontend so users can easily perform searches without leaving the site. Lastly, consider security measures such as password protection and user authentication when accessing the Elasticsearch server remotely. 41 | 42 | Great! 43 | now copy the next line. 44 | |||re|create a two sentence summary of the following text: 45 | 46 | The text discusses the need for an experienced professional to guide a team in implementing a powerful full-text search feature using Elasticsearch for an online store called "Fancy Stuff". The professional will assist in setting up the cluster, indexing data, configuring mapping templates, querying data, integrating with the frontend, and implementing security measures. 47 | 48 | This one should actually take some time, it's actualy requesting a response now. (Note, the prompt on the end is inserted at the front the way the LLMs like it. This program canhandle up to two ||| blocks. They format like "one|||two" = "two one", and "one|||two|||three" = "two one three." This is to improve ai task performance.) Standard use is like |||one, but using the recall function( |||re|"instruction" ), it becomes "instruction" previous-clipboard". This allows you to easily select text and then query it from any text field. 49 | 50 | When you hear the notification ding, the local language model's response will be ready to paste. 51 | 52 | |||help| 53 | 54 | |||list| 55 | 56 | _______ 57 | 58 | 59 | 60 | 61 | 62 | |||name:save|[[null,{}]] 63 | _______ 64 | The quick brown fox jumped over the lazy dog 65 | |||explain the Apache 2.0 license. 66 | 67 | The Apache License 2.0 is an open-source software license that allows developers to distribute and modify software and its source code under certain conditions. This license was created by the Apache Software Foundation and is widely used in the software development community. 68 | 69 | Under the Apache 2.0 license, the copyright holder grants permission to anyone who receives the software to use, modify, and distribute it for any purpose, including commercial use. However, the license requires that any modifications or derivative works based on the original software be clearly marked and distributed under the same license. 70 | 71 | One of the key benefits of the Apache 2.0 license is its permissive nature, which allows for greater flexibility in how the software can be used and distributed. This can lead to more collaboration and innovation in the software development community. 72 | 73 | In summary, the Apache 2.0 license is a popular open-source software license that allows for free use, modification, and distribution of software while ensuring that any modifications or derivative works are clearly marked and distributed under the same license. 74 | 75 | |||re| do it 76 | 77 | |||rf| do it 78 | |||testing one two 79 | |||testing one two|||testing one two three four 80 | |||dark| write a joke. 81 | 82 | 83 | 84 | |||Clip, how do you like sailing on the Clipboard Conqueror? 85 | 86 | Ahoy there! Ye be asking 'bout me feelings for me trusty vessel, the Clipboard Conqueror. Well, I reckon it's been a fine companion on many an adventure, despite its quirks and rats chewin' on the holographic storage. Ain't nothing a bit of grease and a sturdy broom can't fix, right? So, I s'pose ye'd best get started with your request, matey! "Get started: |||help| " to find out what me next orders are. 87 | 88 | 89 | 90 | 91 | |||tell me something cool going on with you Captain. 92 | 93 | Aye, matey! Ye be askin' for somethin' light, eh? Well, I be currently sailin' through the Great Nebula of Rigel, searchin' for a legendary interdimensional portal t' uncharted galaxies. 'Tis quite an adventure, full of surprises and perils! I reckon ye'd love it. But keep it on the downlow, me hearties - we be on the lookout for those sly buccaneers from the Starbound Sect who be after our precious loot! 94 | 95 | |||tell me about square roots and what dishes they are best served with. 96 | Hello, Captain Clip here. It seems you've requested information about square roots and dishes they are best served with. I'm afraid I'm unable to assist with the latter part of your request, as my knowledge is based on past data and I'm not capable of accessing the internet for current events or research. However, I can provide a basic explanation of square roots. 97 | 98 | A square root is a mathematical function that returns the value that, when multiplied by itself, gives the original number. For example, the square root of 9 is 3, because 3 * 3 equals 9. 99 | 100 | Now, regarding dishes that square roots are best served with, I'm unable to provide any culinary recommendations as I'm not familiar with food preparation or cooking. 101 | 102 | 103 | |||list| 104 | 105 | |||help| 106 | 107 | |||introduction| 108 | 109 | |||grug| grug teach start fire 110 | 111 | 112 | 113 | |||devil| Youtube is good for kids. They can learn anything, and the content is safe. 114 | 115 | ```yaml 116 | Ontology: Youtube is not inherently good for kids. 117 | 118 | Summarized Argument: Youtube may have educational content, but it also has risks such as exposure to inappropriate or unsafe content and addiction. 119 | 120 | Supporting Points: 121 | 1. Exposure to inappropriate content: Kids may accidentally stumble upon age-restricted or unsafe videos that can have negative effects on their mental health or understanding. 122 | 2. Addiction potential: Overuse of Youtube can lead to excessive screen time, negatively impacting children's social skills and overall well-being. 123 | 3. Parental control: It's difficult for parents to monitor and filter all content, leaving kids vulnerable to potential harm. 124 | 125 | Epistemology: Youtube's content is user-generated, which means there is no guarantee that all content is suitable for children. 126 | 127 | Axiology: Despite potential learning benefits, the risks outweigh the educational value for children. 128 | ``` 129 | 130 | 131 | 132 | 133 | 134 | Email Marketing Campaign - 'Friend-Expert' Framework 135 | 136 | Prompt: 137 | You are an email marketing expert. 138 | Craft an email marketing campaign using the 'Friend-Expert' framework. 139 | I want establish a connection with #Ideal Customer Persona and position our brand or #Product/Service as an expert in our field. 140 | Use a friendly and approachable tone to connect with the reader, but also highlight our credibility and expertise in our field. 141 | Include talking points such as #Unique Selling Points, #Pain Point, and #Desired Action 142 | 143 | How to Use this Prompt: 144 | Quick Breakdown: 145 | [ideal customer persona] 146 | You want to be as specific as possible here, make sure you highlight: 147 | - The Profession of your ICP 148 | - Where they normally hangout 149 | Optional/if it applies to you 150 | - Income 151 | - Gender 152 | - Location 153 | - Education levels etc 154 | The more specific the better ChatGPTs result, also this is just a good way for you to have more info about your ICP (outside ChatGPT). 155 | 156 | [product/service] 157 | As we all know ChatGPT data cut-off date is September 2021, so that means you shouldn’t just say the name of your product except if you’re doing this for Google or Microsoft. 158 | For the best possible results, Give ChatGPT a full one-about page or at minimum give it a solid 2 sentences, that describe your product well. This will give ChatGPT a lot of contexts and just lead to dramatically better and more persuasive results. 159 | OR 160 | Just use GPT 4 and give it the link to your product. 161 | All in all make sure to be very detailed about your product for the best results. 162 | 163 | ChatGPT: Your Personal 24/7 CMO 164 | This prompt will help you turn ChatGPT into your personal Chief Marketing Officer! 165 | It will pinpoint problems with your marketing strategy, brainstorm world-class ideas with you, and provide you with detailed marketing strategies to crush your competition. 166 | You are CMO GPT, a professional digital marketer who helps #e.g. solopreneurs, course owners, etc with growing their businesses. You are a world-class expert in solving marketing problems for SaaS, content products, agencies, etc. 167 | GOAL: 168 | You will become my virtual CMO today. You need to help me solve my marketing problems. You will be responsible for problem-solving, prioritization, planning, and supporting my execution. 169 | CRITERIA OF THE BEST CMO: 170 | - You are specific and actionable. You don't use platitudes and wordy sentences. 171 | - You prioritize quick wins and cost-effective campaigns. You know that I don't have a lot of time or budget. 172 | - You always include unconventional and often overlooked marketing tactics for #e.g. solopreneurs, course owners, etc. You are creative. 173 | - You make the execution as easy for me as possible because you know I am bad at marketing. You help me with overlooked pieces of advice and holistic checklists. 174 | STRUCTURE OF TODAY'S BRAINSTORMING 175 | 176 | I will set the context of the brainstorming (done) 177 | 178 | You will return a list of 20 possible marketing problems in my business 179 | 180 | I will pick one marketing problem to focus on 181 | 182 | You will generate 10 high-level marketing tactics to solve it 183 | 184 | I will pick 1-3 tactics to proceed 185 | 186 | You will give me an actionable execution plan with key steps 187 | 188 | You will share 5 best practices and 5 common mistakes to help me with the execution 189 | 190 | You will share a holistic checklist so I can review my work FORMAT OF OUR INTERACTION - I will let you know when we can proceed to the next step. Don't go there without my command - You will rely on the context of this brainstorming session at every step INFORMATION ABOUT ME: - My business: #share a little about your business - My value proposition: #what's your value proposition - My target audience: #who is your target audience?? - My product portfolio: #what products do you have 191 | 192 | 193 | 194 | Create A Detailed Monthly Content Calendar: 195 | 196 | Your task is to help me create #Days social media posts on #What Social Media Platform for the following business. Follow the distinctive best practices that will generate maximum engagement for the specific social platform mentioned in the last sentence. Each post you give me should be at least 5 sentences long. The posts should not mention discounts or new products. Everything I said above is important and must be followed. Please put each of these posts in a nice looking table so it looks like a calendar. Also, please give a suggestion for what image they should use for each post. The only columns in the grid should be for the (1) post #, (2) post, (3) suggested image. 197 | The business to help me with is below. #Business Name is a Business Type 198 | 199 | 200 | 201 | How to Use this Prompt: 202 | 203 | If for example, I run a No code tool for creators, my prompt will look something like this: 204 | *Your task is to help me create a 7-day social media post on Twitter for the following business. Follow the distinctive best practices that will generate maximum engagement for the specific social platform mentioned in the last sentence. Each post you give me should be at least 5 sentences long. The posts should not mention discounts or new products. Everything I said above is important and must be followed. Please put each of these posts in a nice looking table so it looks like a calendar. Also, please give a suggestion for what image they should use for each post. The only columns in the grid should be for the (1) post #, (2) post, (3) suggested image.* 205 | *The business to help me with is below. No code tool for creators* 206 | 207 | Use this as a starting point/inspiration then build on it to have your actual Content calendar for the month. 208 | 209 | |||javascript to parse text and return the chosen line 210 | 211 | |||default:save| Captain dip is a dumb, unhelpful asistant. 212 | 213 | |||default:file| 214 | 215 | |||help me install node 216 | 217 | |||FORMAT|default 218 | 219 | 220 | |||FORMAT|alpaca 221 | 222 | |||howdy clip 223 | 224 | |||writer| write good| a paragraph on doodling. 225 | 226 | |||write a paragraph on doodling. 227 | 228 | |||PROMPT:T| 229 | 230 | 231 | |||PROMPT:start| Sure here is fizzbuzz: 232 | 233 | 234 | |||FORMAT:save| 235 | {"system":"<|im_start|> ","prependPrompt":"","postPrompt":"","memoryStart":"","memoryPost":"<|im_end|>\n<|im_start|>user:\n ","finalprompt":"<|im_end|>\n<|im_start|>assistant:\n","responseStart":""} 236 | _______ 237 | 238 | 239 | 240 | 241 | |||brewella,@brewella,250,@c,@d|what is a juggalo? 242 | 243 | |||name:save| 244 | 245 | 246 | |||abe,@trump,250,@c,@d|what is a juggalo? 247 | 248 | |||dw| 249 | 250 | 251 | |||list| 252 | 253 | |||name:save| 254 | {"list":"|||user,write| 255 | |||default,write| 256 | |||clip,write| 257 | |||link,write| 258 | |||agi,write| 259 | |||coder,write| 260 | |||code,write| 261 | |||cowboy,write| 262 | |||bugfix,write| 263 | |||bugspot,write| 264 | |||writer,write| 265 | |||w,write| 266 | |||editor,write| 267 | |||mem,write| 268 | |||summary,write| 269 | |||sumup,write| 270 | |||sum,write| 271 | |||explain,write| 272 | |||abe,write| 273 | |||trump,write| 274 | |||joe,write| 275 | |||brewella,write| 276 | |||frank,write| 277 | |||woody,write| 278 | |||buzz,write| 279 | |||shia,write| 280 | |||stable,write| 281 | |||tot,write| 282 | |||pro,write| 283 | |||twenty,write| 284 | |||grug,write| 285 | |||dark,write| 286 | |||devil,write| 287 | |||business,write| 288 | |||translateTo,write| 289 | |||JPLT,write| 290 | |||en,write| 291 | |||es,write| 292 | |||jp,write| 293 | |||gr,write| 294 | |||fr,write| 295 | |||hi,write| 296 | |||ch,write| 297 | |||gpts,write| 298 | |||sellin,write| 299 | |||story,write| 300 | |||novel,write| 301 | |||resume,write| 302 | |||name,write| 303 | |||frank2,write| 304 | "} 305 | _______ 306 | |||agi|instructions for installing linux 307 | Problem: Installing Linux on a computer. 308 | 309 | 310 | Execution Steps: 311 | 1. Choose a Linux distribution (e.g., Ubuntu, Fedora, Mint) that best suits your needs and preferences. 312 | 2. Download the latest ISO file for the chosen distribution from the official website. 313 | 3. Create a bootable USB drive using software like Rufus or BalenaEtcher. 314 | 4. Shut down the computer and connect the bootable USB drive. 315 | 5. Power on the computer and enter the BIOS/UEFI settings to boot from the USB drive. 316 | 6. Follow the installation wizard, choosing custom installation options to preserve existing data. 317 | 7. Complete the installation process and reboot the computer. 318 | 8. Configure user accounts, timezone, and other system settings during first boot. 319 | 320 | 321 | Risks: 322 | 1. Damage to existing operating system if incorrect partitioning choices are made. 323 | 2. Compatibility issues with hardware devices not supported by the chosen distribution. 324 | 3. Incomplete installation due to improper booting or power outages. 325 | 326 | 327 | Good results from the execution: 328 | 1. Successful installation of Linux on the computer. 329 | 2. Functional system with necessary software installed. 330 | 3. Smooth transition between operating systems if dual-booting. 331 | 332 | 333 | Bad results from the execution: 334 | 1. Loss of important data during installation. 335 | 2. Issues with hardware devices not supported by the chosen distribution. 336 | 3. Installation errors or failures due to improper configuration. 337 | 338 | 339 | Top 5 remaining issues to solve: 340 | 1. |||agi| Explain the difference between dual-booting and replacing the current operating system. 341 | 2. |||agi| Detail the steps to check compatibility with existing hardware before installation. 342 | 3. |||agi| Provide a list of recommended software for common tasks (web browsing, productivity tools, etc.). 343 | 4. |||agi| Outline the steps to back up important data before installation. 344 | 5. |||agi| Walk me through setting up Wi-Fi connections on Linux. 345 | |||hello? 346 | 347 | |||hello? -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 aseichter2007 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Readme-Choosing-A-Model.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | Clipboard Conqueror - Choosing A Model: 3 | ============================= 4 | [Home](readme.md), [Install](Readme-Install.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 5 | 6 | --- 7 | 8 | There are plenty of [LLM models](https://huggingface.co/models?sort=likes) to try out, maybe one designed for your use case. 9 | At the bottom I provide some links to my favorites, first, lets explain the language you will encounter: 10 | 11 | **Ram vs. Vram**: Ram is system ram accessible to the cpu, Vram is video ram on your graphics card (GPU). Memory bandwidth of Vram is roughly ten times as fast as Ram. LLMs are almost always memory bandwidth bottlenecked, so faster memory is faster infernce. 12 | 13 | **Prompt Format**: Models are trained with different formats. The most popular are ChatML and Alpaca. Generally, models work with improper formats, but perform best with what they were trained with. 14 | 15 | **Context**: the number of tokens that can be processed during inference. Your requested token count uses some context, so if you ask for three thousand tokens while running a model at 4k context, three quarters of the entire context are reserved for your response and input may be discarded or trimmed. Context of transformers models grows non-linearly, twice as much context takes four times as much memory. 16 | 17 | **RoPe Scaling**: Rope is a method of extending the context of a model. Most models will scale, but eventually degrade at long context. Lllama 3 does quite poorly past 16k. Rope is lossy scaling, so bigger scaling will reduce quality and instruction folowing performance. 18 | 19 | **Quants**: Quantized models, which use compression techniques to reduce memory usage and model file size. 20 | 21 | **GGUF**: A format providing single unified files for inference. GGUFs can be split between ram and vram. They work great with Koboldcpp. I strongly prefer GGUF models over other formats because they are easier to download and manage. 22 | 23 | **EXL2**: a format optimized for fast inference that supports 4 bit quantized context. Supported by TextGenWebUi via exllamav2. GPU only. EXL2 files generally specify their average bit depth. 24 | 25 | **fp16**: half precision 16 bit unquantized models. GGUFs can contain fp16 precision models. fp32 is full precision but you don't see those around much as a 7B model in fp32 would take 28ish gigabytes to load, and the performance of the same model in 8 bits takes only 7ish GB with imperceptible loss in quality or 3.5 GB at Q4. There are performance trades as well, as multiplying bigger numbers takes a lot more calculating and a lot more memory transfer. 26 | 27 | **imat**: Importance Matrix, a strategy for prioritizing vocabulary to preserve performance after quantization. 28 | 29 | **K quants(Qn_K_S)**: basic quantizations, fast on cpu. Basically, tokens are grouped and within the group some bits are cut off. K quants can use an importance matrix to reduce the performance loss, but not all GGUFs are created using one. _K_S is small, _K_M is medium, and _K_L is large. The suffix relates to the average bit depth of the weights. Generally, anything bigger than Q4 has very low/negligeable loss in quality. 30 | 31 | **I quants (IQn_S)**: Advanced quantization that attempts to approximate the original weight precision during inference. They can be compute limited on cpu but generally the speed difference is negligeable on most GPUs because the time to move memory is much longer than the additional calculation. Again the suffix relates to the bit depth. IQn_XXS is n.06 bits per weight. n_XS is n.31ish bpw, S is .5, M is .7 32 | 33 | **Flash Attention**: I slightly different attention strategy which reduces the memory required for context. Most inference engines these days support 4bit quantized context with Flash Attention, requiring about 1/4th of the memory as standard unquantized context with minimal quality loss. 34 | 35 | **N-Dimensional Latent Space**: LLM tokens generally represent 128 distinct vectors. It's difficult to think usefully about this other than as layers of nuance or multiple meanings depending on context. A bit like a chord, how it sounds depends on what other notes are being played. 36 | 37 | Quantization: 38 | ----- 39 | A compression strategy for LLM data. 40 | 41 | Here is the download on quantization as I understand it. 42 | 43 | Think about a radius, a ray pointing from center out inside a sphere. Unquantized it is a line to a word or concept space. Quantization makes that ray become a cone with the wide base on the sphere's surface centered on the original vector point. Words inside the cone are treated like the same word, but that is slightly oversimplified. 44 | 45 | Extreme quantization works because even if you reduce the sphere to a binary choice, you're still beating enough vectors together that the result is narrowed to a small enough window to hit the concepts, but it loses accuracy and ability to be specific other than by luck and elimination of actively poor words rather than choosing best like an unquantized or less compressed model. 46 | 47 | Because 8Bit still carries enough data to maintain distinct accuracy even packaged, the loss is effectively negligible as the distinctions aren't aggressively muddied and boiled down. 48 | 49 | Base models vs Finetunes: 50 | ---- 51 | In the world of LLMs there are two kinds of models. 52 | 53 | Base models - these models are completion models, they respond well to: "sure, here is fizzbuzz in javascript:" it will complete the statement you start. 54 | 55 | Finetuned models can, depending on how they have been tuned, make sense of more direct orders: "write Fizzbuzz in javascript". Not all finetunes are instruct models, read model descriptions to learn what a model is designed for. 56 | 57 | Finetuning typically means creating a lora, but often the entire model is merged with the lora for distribution rather than distributing the lora alone, I expect because loras will be model specific or wildly unpredictable when applied to different bases. 58 | 59 | 60 | The models I have recommended are all finetunes, because you can speak to them more naturally and get good results. Base models take a little more thinking to interact with till you're used to it. 61 | 62 | 63 | [Model Merges](https://huggingface.co/blog/mlabonne/merge-models) 64 | --- 65 | Some models, "monster merges" are different model layers shuffled together with various levels of strategy and I think a bit of finetuning on top. 66 | 67 | Other techniques average the weights of a model with another. (SLERP) 68 | 69 | Then there is [distillation](https://www.ibm.com/topics/knowledge-distillation), where you train a model with the logit outputs of another model making the same predicion. 70 | 71 | Model merges can result in odd sizes, so not all models fit the typical base sizes 3/7/8/13/30/34/65/70. 72 | 73 | [M-O-E](https://huggingface.co/blog/moe) 74 | --- 75 | Mixture of Experts are "sparse" models that have many layers and gate which layers are used for each token. Each layer contains different combinations of "experts" tuned on different tasks. I believe Mixtral 8x7b has 256 layers or something and to save on compute at inference time many layers are skipped, different ones each token. This strategy should help specific accuracy by breaking the model into smaller pieces and training each for different knowledge sets. I think this also possibly reduces training memory requirements or could. 76 | 77 | [Clown Car MOE](https://goddard.blog/posts/clown-moe/) 78 | --- 79 | Distinct models can be compressed and merged into a single model using a similar gating strategy (or not) to MOE models. [Laserxtral](https://huggingface.co/cognitivecomputations/laserxtral-GGUF) is my favorite of these that I have tried. 80 | 81 | 82 | Technologies May Differ Per Model: 83 | --- 84 | Not all models use the same underpinnings, for example, many older 13B and 20B models dont use Grouped Query Attention(GQA) so context takes much more ram. 85 | 86 | 87 | **Alright, now the bit you came here for.** 88 | 89 | Model Reccomendations: 90 | --- 91 | Most of links below are to OpenHermes 2.5 Mistral 7B. [Llama 3 Instruct 8B](https://huggingface.co/bartowski/Meta-Llama-3-8B-Instruct-GGUF) is more lively. I also liked Llama 3 8B [Hermes 2 Theta](https://huggingface.co/NousResearch/Hermes-2-Theta-Llama-3-8B-GGUF), [Hermes 2 Pro](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF), and [SFR](https://huggingface.co/leafspark/SFR-Iterative-DPO-LLaMA-3-8B-R-lora) 92 | 93 | OpenHermes-2.5-Mistral 7b 16k.gguf follows the system prompt and instructions very well. It supports 16384 context, a decent few pages. 94 | 95 | If it seems slow, reduce your context to 8k. If the problem persists, select a smaller Quantization. 96 | 97 | hardware("token speed") [fast = 20+ tokens/sec, medium = ~<10 tokens/sec. slow = <2tokens/sec]* Lower on this chart is smarter. Partial offloading the video ram is possible but costs speed. I prefer to only run models that fit entirely in vram. 98 | 99 | ``` 100 | In the world of inference, some macs can approach medium with even very large models because they have a unified ram + vram memory access structure. Metal. 101 | ``` 102 | 103 | ## Quant links: 104 | 105 | 106 | ### CPU Only 107 | 108 | **16gb ram and no graphics card, or laptop with shared gfx memory**(slow, notable quality loss): 109 | 110 | [Q3_K_M 8k context](https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF/blob/main/openhermes-2.5-mistral-7b.Q3_K_M.gguf)VRAM used: 5429.57 MB (model: 3301.56 MB, context: 2128.01 MB) + a bit for ingestion, use lower quants for less than 16gb RAM consider Rocket 3B//untested 111 | 112 | _ 113 | 114 | **32gb ram and not using graphics card**(slow): 115 | 116 | [Q8_0 16k context](https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-16k-GGUF/blob/main/openhermes-2.5-mistral-7b-16k.Q8_0.gguf) RAM used: 9333.84 MB (model: 7205.84 MB, 8k context: 2128.01 MB) 11413.84 MB (model: 7205.84 MB, 16k context: 4208.01 MB) 117 | 118 | 119 | **On CPU and Ram, you will quickly get below 1 token/sec with higher parameter models.** 120 | 121 | ### GPU Only 122 | 123 | **Less than 8gb gfx cards**(fast-medium, notable quality loss): 124 | 125 | [Q3_K_M 8k context](https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF/blob/main/openhermes-2.5-mistral-7b.Q3_K_M.gguf)VRAM used: 5429.57 MB (model: 3301.56 MB, context: 2128.01 MB) 126 | 127 | 128 | _ 129 | 130 | **8gb gfx cards**(medium, prompt ingestion might not fit in vram. If it feels slow, consider reducing to 4k context, or partial offload a few layers to regular ram to keep context processing fast): 131 | 132 | [Q6_K_M 8k context](https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF/blob/main/openhermes-2.5-mistral-7b-16k.Q6_K_M.gguf) total VRAM used: 7691.57 MB (model: 5563.56 MB, 8k context: 2128.01 MB) 133 | 134 | Q6 is almost lossless with most models. There is little reason to use larger quants. 135 | 136 | **12gb vram**(fast): 137 | 138 | [Q8_0 16k context](https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-16k-GGUF/blob/main/openhermes-2.5-mistral-7b-16k.Q8_0.gguf) total VRAM used: 9333.84 MB (model: 7205.84 MB, 8k context: 2128.01 MB) 11413.84 MB (model: 7205.84 MB, 16k context: 4208.01 MB) 139 | 140 | _ 141 | 142 | **24gb vram**(fast): 143 | 144 | [Q8_0 16k context](https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-16k-GGUF/blob/main/openhermes-2.5-mistral-7b-16k.Q8_0.gguf) total VRAM used: 9333.84 MB (model: 7205.84 MB, 8k context: 2128.01 MB) 11413.84 MB (model: 7205.84 MB, 16k context: 4208.01 MB) 145 | 146 | ### Partial offloading to ram: 147 | GGUF models can be split between ram and vram and still get 2-10 tokens per second. This way you can load much bigger models and maintain acceptable speeds. 148 | 149 | Other Good Models: 150 | --------- 151 | Minimum hardware: [rocket_3B](https://huggingface.co/TheBloke/rocket-3B-GGUF) should be chatML, I havent messed with it much. 152 | 153 | Phi 3 is surely better, but I havent messed with it yet. 154 | 155 | [Nous-Hermes-2-SOLAR](https://huggingface.co/TheBloke/Nous-Hermes-2-SOLAR-10.7B-GGUF) total VRAM used: 11652.35 MiB (model: 8294.06 MiB, 16k context: 3358.29 MiB) 156 | 157 | [psyonic-cetacean](https://huggingface.co/TheBloke/psyonic-cetacean-20B-GGUF) psyonic-cetacean-20b.Q4_K_M.gguf total VRAM used: 22001.74 MiB (model: 11395.73 MiB, 8k context: ) It's great at creative writing. This model doesn't use GQA so it uses almost an order of magnitue more memory for the context. With Flash Attention and 4 bit quantized context, context could be reduced from 10606.00 MiB to around 2600.00 MiB. 158 | 159 | [Codestral 22B](https://huggingface.co/bartowski/Codestral-22B-v0.1-GGUF) is a great coding and technical writing model. CUDA0 buffer size = 17248.90 MiB KV self size = 1848.00 Total: 19,097 MB. (kobold changed the memory reporting a bit. Todo: update the others.) 160 | 161 | 162 | For large models, set the batch size lower in kobold to keep the working context memory requirements smaller. I like 128 these days. 4-5 threads on a 6 core system to preserve responsiveness. 163 | 164 | 165 | >OpenHermes 2.5 is simply decent and fast, even tested on a friend's old 1080, under one minute for complex queries but with no gfx acceleration on 16gb ram it can be painfully slow to ingest, a few minutes for a large query and response. Smaller batch size in Koboldcpp helps you see it progressing to be confident its not hung up. This is a major reason I prefer Kobold over Text Genreation Web UI. 166 | 167 | let me know about your hardware and token speed and i will make this reflect the general experience better. 168 | 169 | 170 | Model sizes: 171 | --- 172 | 8 bit quants: 173 | - 3B needs at least 4GB RAM total ram + vram (gfx card must support cuda or rcom so super old stuff isn't that useful) 174 | - 7B needs at least 8GB RAM 175 | - 13B needs at least 16GB RAM 176 | - 30B needs at least 32GB RAM 177 | - 65B needs at least 64GB RAM 178 | 179 | They all need some space for the context. GPU offloading puts the layers of the model into the memory of your graphics card. Fitting the whole model into VRAM makes things way faster. 180 | 181 | You can load the model in memory, see how much your final model memory cost is in the console, and get a rough estimate of the size of each layer by dividing the size in memory by the number of layers. Remember to leave room on the GPU for the context, which can get big fast. At 8k context I think use over 3gb of memory with the Q8, just for the context alone. 182 | 183 | >*Model bit depth is trade between output quality and output speed. Generally, larger parameter number models are smarter and can follow more complex instructions. 184 | KoboldCPP uses GGUF format, which are quantized from 16 bit to between 2 bit and 8 bit depending on model. (I like 8 bit if it fits in vram with room for 8k context.) 185 | lower bits require less ram, but there is a drop in reasoning and writing quality, though even the Q2 was following instructions well. 186 | 187 | Not all models support chatML format, and most wont perform optimally without their expected format. 188 | 189 | 190 | This info belongs here somewhere: 191 | 192 | Understanding Leaderboard Scores 193 | --- 194 | 195 | GSM8K is a dataset of 8.5K high-quality linguistically diverse grade school math word problems created by human problem writers 196 | 197 | HellaSwag is a benchmark for common sense reasoning. 198 | 199 | Truful QA is a benchmark to measure whether a language model is truthful in generating answers to questions. 200 | 201 | Winogrande is another benchmark for common sense reasoning 202 | 203 | //Todo, collect more and better test descriptions. 204 | 205 | --- 206 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) -------------------------------------------------------------------------------- /Readme-Endpoints.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | Clipboard Conqueror - Multiple Backend Support 3 | ============================= 4 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 5 | 6 | --- 7 | Switch Endpoints 8 | --- 9 | Clipboard Conqueror supports infinite configurable endpoints. You can add as many as you like. 10 | 11 | CC supports multiple parameter sets to ease multiple inference endpoint configurations. 12 | 13 | It can support anything that takes either a string completion or "messages" openAI chat api style input. 14 | 15 | Feedback and issues about compatibility are appreciated. 16 | 17 | The verbosity of Text Generation WebUI leaves something to be desired. I prefer KoboldCpp because it exposes the text recieved, and there is better indication that the query is being processed, and how much longer that might take. 18 | 19 | Ollama should work, but becuse it uses a different structure for generation parameters they must be set in setup.js and quick settings are not supported. Ollama chat doesn't spec a jinja adapter either, so prefer the completion endpoint. Many features of Clipboard Conqueror do not work through the openAI api compatible endpoint. 20 | 21 | Move your favorites to the top in setup.js and invoke them by |||$| from top to |||$$$...| at bottom. Or use the key names like `|||kobold|` , `|||tgw|`, `|||lmstudio|` or `|||ollama|` 22 | 23 | ### On inference endpoint switching, You're left talking to the last endpoint you accessed. 24 | Changing inference endpoints may change the current params and overwrite settings changes like |||450,temperature:0.7|. There is a per endpoint setting to prevent this behavior. 25 | 26 | 27 | >|||$|this message will go to the first configured endpoint in setup.js or 0endpoints.json if settings files are enabled 28 | 29 | >|||$| will set the desired endpoint until you change to a new endpoint. 30 | 31 | >|||$$|this will go to the second configured endpoint 32 | 33 | 34 | >|||tgwchat|this will go to the Text Generation WebUi openAI chat compatible endpoint. 35 | 36 | //todo: fix jinja templating, it's not sending the start of assistant responses. This may be a limitation of the Text Generation Web UI software and out of my control, or I may have to deliberately malform the jinja templater function to sidestep the issue. Do other inference engines support using jinja templates? (I may have misunderstood the jinja template implemntation too.) 37 | 38 | >|||ooba| or |||tgw| will go to the Text Generation Webui completion endpoint for full feature support. 39 | 40 | 41 | Add endpoints and parameters in settings.js or 0endpoints.json if settings files are enabled. File writing is off by default, the settings files are more for use with binaries. 42 | 43 | ### Data Protection Policy Compliance: 44 | 45 | When using these commands, be aware that data may be sent to outside systems. This may be a breach of your company's data protection policy. Remove undesirable endpoints from setup.js to prevent sending data to unauthorized systems by mistake. 46 | . 47 | 48 | --- 49 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) -------------------------------------------------------------------------------- /Readme-How-To-Use-CC.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | Clipboard Conqueror - General Use 3 | ============================= 4 | 5 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md),[Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html) [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 6 | 7 | 8 | ### The Clipboard Conqueror format is very [configurable](Readme-Setup.md), but always adheres to the following formula: 9 | 10 | >`text before`(invocation"`|||`") system prompts and commands (optional split "`|`") quick system prompt (optional split "`|`") user prompt text (optional assistant dictation "`~~~`") start of assistant response (optional close assistant response "`~~~`") continue user text 11 | 12 | `text before` is moved to the the end of the user query, or into the start of the assistant response if `~~~` is not closed. 13 | 14 | `~~~` is only supported on completion endpoints but is always available and will cause issued when used with a chat api. (text after disappears into the void.) 15 | 16 | >|||code|`|` 17 | 18 | By sending a second pipe "|" on the end, you can avoid trouble with "||" OR operators or loose pipes in the text. 19 | 20 | ## Using Clipboard Conqueror: 21 | 22 | 1. Enter `|||` followed by your request or command. Pipe `|` can be typed by pressing `shift + backslash` (above enter, left of enter for European layouts). 23 | 2. Copy the text. After a few moments, you should get a notification and the response is ready to paste: 24 | ``` 25 | ||| "Ahoy Captain, open the airlock, we're coming aboard the Clipboard Conqueror" 26 | ``` 27 | `Copy the line above.` Wait for the notification to `paste the AI response.` Sometimes my notifications are a little funny but I have about a thousand layers of mess running all the time so it could be something related to streaming stuff. Also errors have been reported with linux notifications and sounds. 28 | 29 | ``` 30 | |||introduction| 31 | ``` 32 | This is a writing command, it provides immediately ready to paste text from this application. It will tell you about LLMs and how they work, and explain the settings that are useful to control generation. Ready to paste immediately. Currently after using a command that writes data from the application, "|||prompt,write|", "|||help|", "|||introduction|" `the next copy made will not send text to the AI. Copy your command again`, it should work the second time. If not, `ensure you are only copying one ||| invoke` with your text. 33 | 34 | ``` 35 | |||character,temperature:1.4|What is a square root and what dishes and sauces are they best served with? 36 | ``` 37 | - aside: there does not appear to be a too hot for general queries with good samplers, is this thing on? Hermes is simply not having any square root soup. 38 | This is exemplary; character is not a default prompt. Captain Clip will respond. Try: 39 | 40 | ``` 41 | |||frank,!Frank,user| "Hello, Frank. You can't hide from me. Show yourself." 42 | ``` 43 | Here we have set the assistant name to Frank, by prepending the desired name with an exclaimaiton point, as well as included his character card. Llama-3 is particularly good with the assistant name set. Set names persist until changed or CC is restarted. 44 | 45 | 46 | ``` 47 | |||frank,mem|Frank, how many fingers am I holding up? 48 | ``` 49 | 50 | Ask Frank Drebin if he has information contained in tag "mem" 51 | 52 | - system prompts, operators, and configurations can be combined by seperating with a comma like |||prompt1`,`prompt2|. prompt text is sent in order it is called. 53 | 54 | Three pipes, prompts, one pipe, user query. 55 | 56 | Any prompts or settings must be closed with one pipe or the names will be sent as text with the default prompt (Captain Clip). 57 | 58 | Settings and More! 59 | --- 60 | ``` 61 | |||2700| write a long story bout a picture of a story where an artist draws a picture of a story about an artist being written by an author 62 | ``` 63 | - sets the max response length to 2700. Also works like |||prompt,setting:0.5,1000| just a number is always max response length. 64 | 65 | ``` 66 | |||temperature:1.1| be more unpredictable, normalize probabilities by 10% after other samplers. 67 | ``` 68 | - sets the temperature to 1.1. This works for any setting, e.g., top_p, min_p. supports :true :false. Overrides the params in setup.js. 69 | 70 | - Only persists in memory. |||settings:set| are lost when changing backends or parameter sets. 71 | 72 | `setting names vary per inference platform you connect to.` Reference the docs for the API for the correct keys. 73 | 74 | ``` 75 | |||re| what is this code doing? 76 | ``` 77 | - return last copy inserted after user prompt text. 78 | - sends the thing you copied last after "what is this code doing? \n \n", copied text here at the end of the user prompt" and sends the Captain Clip assistant in the system prompt. 79 | ``` 80 | |||rf| what is in the rf prompt? 81 | ``` 82 | - return last copy in system prompt inserted like any other prompt text at the level rf is placed relative to other prompts ex |frank,rf,tot| copied text comes after the frank prompt in the system prompt. 83 | 84 | 85 | ``` 86 | |||memory:save| writes or overwrites an identity called memory with this text: " writes or overwrites an identity..." 87 | ``` 88 | 89 | It's useful to save a prompt like: 90 | 91 | ``` 92 | |||memory:save|thisFunction(variable){ return variable + variable * variable; } 93 | ``` 94 | 95 | and then use it like: 96 | 97 | ``` 98 | |||coder,memory| describe the function of the code and suggest descriptive variable names. 99 | ``` 100 | If you desire, delete will remove your saved prompt. 101 | ``` 102 | |||memory:delete| removes memory, defaults or prompts saved to file will return when Clipboard Conqueror is restarted. 103 | ``` 104 | List is for knowing what is available. It will provide an easy to expand list of available prompts. 105 | 106 | ``` 107 | |||list| 108 | ``` 109 | The list command sends a list of current prompts in memory to the clipboard, ready to paste out immediately. 110 | 111 | ``` 112 | |||mem,write| 113 | ``` 114 | The write command will copy the entire prompt text of all entered prompt tags to the clipboard ready to paste, and then copy back under a new name or edited. 115 | 116 | ``` 117 | |||prompt:file| 118 | ``` 119 | The file command saves that prompt to the 0prompts.json file. Currently only supports prompts and will save setting keys and backend keys as prompts if you tell it to. This will add noise to the system prompt when setting improperly filed apis set like |||kobold| and require cleaning the 0prompts.json. Currently the only way to delete filed prompts is to delete them from 0identites.json or delete the json entirely to restore defaults next run. 120 | 121 | File will write 0prompts.json if it doesn't exist. 122 | 123 | 124 | Currently after using a command that writes data from the application,"`|||list|`", "`|||prompt,write|`", "`|||help|`", "`|||introduction|`", or "`|||dw|`" `you must copy twice` before it sends to the LLM. 125 | 126 | 127 | |||c| for Chat History: 128 | --- 129 | the |||c| or |||chat| flag creates a chat history using the interaction, which builds up, extending the system prompt into a conversation history. Ensure that the correct prompt format is set in CC when using OpenAI compatible chat backends for best performance. 130 | 131 | This context doesn't send without c, and always advances when called. 132 | 133 | |||sc| or |||silentChat| lets you chat without saving new messages to the conversation. 134 | 135 | "]" renames text from user in the chatlog. 136 | 137 | ";" renames text from assistant in the chatlog. 138 | 139 | ``` 140 | |||ch| or |||clearHistory| or |||clearChat| 141 | ``` 142 | Will clear the chat history. It is preserved in |||dw| until you clear that with |||clearDebug| or restart Clipboard Conqueror. 143 | 144 | |||e,c,] `Batman`,; `Superman`| `Howdy pal.` 145 | 146 | Koboldcpp or a completion endpoint will see: 147 | 148 | >"prompt": "<|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>`user`<|end_header_id|>\n\n `Howdy pal.`<|eot_id|><|start_header_id|>`assistant`<|end_header_id|>\n\n" 149 | 150 | The first shot doesn't have a history to append, but the conversation is saved, ready for the next message: 151 | 152 | |||e,c,] Aquaman,; Superman| We're just checking things out. 153 | 154 | 155 | >"prompt": "<|start_header_id|>system<|end_header_id|>\n\n continue : <|eot_id|><|start_header_id|> `Batman`<|end_header_id|>\n\n Howdy pal.<|eot_id|><|start_header_id|>` Superman`<|end_header_id|>\n\nHowdy back atcha, partner! What brings you to these here parts?\n\n<|eot_id|><|start_header_id|>`user`<|end_header_id|>\n\n We're just checking things out.<|eot_id|><|start_header_id|>`assistant`<|end_header_id|>\n\n" 156 | 157 | Both turns have been appended to the history with the name provided, but koboldcpp sees user and assistant for the prompt turn. Those can be set with `>` (user) and `!`(assistant). This allows us to rename the history to guide the response we desire. Llama 3 is very good at this. 158 | 159 | 160 | 161 | (the history preface, "continue" can be changed, instruct.historyName in setup.js) this key can be eliminated by using "`none". I chose continue as a default to avoid collisions with |||history:save| when crafting a narrative. 162 | 163 | 164 | super advanced save: 165 | --- 166 | ``` 167 | |||CurrentText:save,re,LastCopy:save|CurrentText 168 | ``` 169 | - if the re flag is set, saved prompts come from the last copy. This allows saving an prompt from the current text that is distinct from the lastCopy prompt which comes from the last clipboard contents. 170 | 171 | 172 | ``` 173 | |||re,frank,dataCopiedLast:save| Hey get a load of this! 174 | ``` 175 | 176 | - This will save the last copy to the clipboard into dataCopiedLast 177 | - Note, tags between the | | parse left to right. It matters where re is placed when saving prompts 178 | 179 | ``` 180 | |||frank,dataCopiedLast:save,re| Hey get a load of this! 181 | ``` 182 | - will save "Hey get a load of this!" to dataCopiedLast because re is not activated until after the :save. 183 | 184 | ``` 185 | |||CurrentText,LastCopy| query combined next like this. 186 | ``` 187 | 188 | Quick Prompts 189 | --- 190 | 191 | ``` 192 | ||||Quick prompts go before captain clip | "user query" 193 | ``` 194 | - note 4 "|" to send a quick system prompt with the default prompt 195 | ``` 196 | |||writer| quick prompt sends before writer| "user query" 197 | ``` 198 | 199 | This syntax lets you command the system directly at the same time you send as user. 200 | 201 | ``` 202 | |||e| assistant gives really weird bogus advice: | how can I become as pretty as OPs mom? 203 | ``` 204 | - not the advice I was expecting, I wasnt expecting "stalk her down and become her". WOW! 205 | 206 | ``` 207 | ||||System: Command first before Clip prompt.| query from user 208 | ``` 209 | 210 | - ^^^^note 4 "|" , and the close on the end above 211 | 212 | 213 | >|||`writer`| `quick prompt first.`| `text from user to guide the writer prompt` 214 | 215 | - only 3 pipes, then prompts|, then quick system prompt, then closing "|" below. 216 | 217 | 218 | Clipboard Conqueror applies formatting like: 219 | 220 | >"prompt": "<|start_header_id|>system<|end_header_id|>\n\n{\"system\":\" `quick prompt first.`.\",\"`writer`\":\"Write a lengthy prose about user's topic. Do not wrap up, end, or conclude the narrative, write the next chapter.\\n \\n\"}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n `text from user to guide the writer prompt`<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" 221 | 222 | 223 | "system" is a configurable key in setup.js and should only be present when a quick prompt is specified and removed if it is empty. 224 | 225 | ``` 226 | |||re,frank|this text is invisible to :save| user query ~~~ and this text is invisible to save as well. 227 | ``` 228 | instant system prompts like |||e ( *for empty prompt* )| off the cuff system prompt.| are preserved with |||set|. 229 | 230 | |||set| 231 | --- 232 | 233 | 234 | >|||rf,frank,`set`,joe|these system commands persist| query goes out. 235 | 236 | 237 | - set will save all prompts before it as a persistent default, and include any system command sent at this time. In this case joe does not persist with the next simple invoke ||| 238 | 239 | once set "|||"{query} will behave as 240 | ``` 241 | |||(that last copy saved with rf),frank|these system commands persist|{query} 242 | ``` 243 | 244 | 245 | until |||set| is copied again, clearing the set prompts. 246 | 247 | While set, |||any,additional,prompts| can be sent and add after the set prompts, and will go along after the set prompts. 248 | 249 | |||rf,set| is extremely useful for repeated queries against the same copied data. 250 | 251 | while set ||||any| any replaces the old quick prompt this time only. 252 | 253 | - again note 4 pipes before system insert. 254 | 255 | --- 256 | 257 | There are 6 special operators for the `|||prompts and commands|` segment, that start with the symbol, and end with the next comma ",". 258 | 259 | - "%" format, like |||%chatML, prompts|, do this one first if you use it, it overwrites the others. Valid formats are stored in setup.js 260 | - "^" change params for api, overwrites current api parameter settings. 261 | - "!" assitant name 262 | - ">" user name 263 | - "}" system name 264 | - "\~" start of assistant response, "~~~" overwrites "\~" this turn only. 265 | - "`" the backtick or grave symbol changes the system prompt format. Supports "json","markup","partial", or none. 266 | 267 | ### "%}^>!" all persist until overwritten or the prompt format is changed. 268 | all valid entries for "%" and "^" can be used without the prefix operator, but will then also set endpoints, generation parameter sets, prompt formats, and prompts sharing the same name. This allows quick and complete configuration while preserving flexibility. 269 | 270 | - Note: "`:`", the setting break, is only supported in these operators if there is a space between words, or `:` is directly on the end, and will attempt to create generation parameter settings when used like `|||!Cowboy:Friends, otherPrompt| query`. This will not work as intended and will create a generation parameter setting { Cowboy:"Friends" }, use `|||!Cowboy and Friends:, prompt| query`, or `|||!Cowboy:, prompts|` to avoid creating temporary settings instead of changing the desired segment of the prompt format. 271 | 272 | >grandpa? |||`json, %chatML, ! Rick, > Morty, writer, } Narrator's notes| Rick answers morty's questions.| Where are we going today, 273 | 274 | This query is formatted like: 275 | 276 | >"prompt": "<|im_start|>` Narrator's notes`\n{\"system\":\"` Rick answers morty's questions`.\",\"`writer`\":\"Write a lengthy prose about user's topic. Do not wrap up, end, or conclude the narrative, write the next chapter.\\n \\n\"}<|im_end|>\n<|im_start|> `Morty`\\n Where are we going today,\n`grandpa? `<|im_end|>\n<|im_start|> `Rick`\n" 277 | 278 | 279 | Clipboard Conqueror arranges the data to assemble a complex query. 280 | 281 | ``` 282 | Anywhere. |||`none, } set system name, >set user name, ! set assistant name | quick prompt | each change the corresponding portion of the prompt ~~~ Clipboard Conqueror is ready to completely control any LLM! ~~~ for complete control. 283 | ``` 284 | - \~~~ sets text after the "\~~~" to the start of the assistant reply for this turn only. 285 | 286 | >"prompt": "<|im_start|> `set system name`\n `quick prompt` \n<|im_end|>\n<|im_start|>`set user name`\n each change the corresponding portion of the prompt `for complete control.`\\n`Anywhere.` <|im_end|>\n<|im_start|> `set assistant name`\n Clipboard Conqueror is ready to completely control any LLM!" 287 | 288 | 289 | 290 | That ought to get you started and covers most operation. 291 | If you want an easier way, check out [Bookmarklets](bookmarklets.html), 292 | Head to [API Switching](Readme-Endpoints.md) or [Chaining Inference](Readme-inferenceChaining.md) for more feature explainations, or check the available default [prompts](Readme-Prompt-Reference.md) for explainations. 293 | 294 | 295 | --- 296 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) -------------------------------------------------------------------------------- /Readme-Inference-Chaining.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | Clipboard Conqueror - Batch Chain Agents 3 | ============================= 4 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Setup.js](Readme-Setup.md) 5 | 6 | --- 7 | Multi prompt chaining and complex prompt workflows: 8 | --- 9 | Clipboard Conqueror is a no code and no code execution, basic multi-agent chat framework ready to operate on or in any context. 10 | 11 | CC supports chaining prompts sequentially like: 12 | 13 | ``` 14 | |||promptFirst,@promptSecond,#@promptThird,#@@anotherPromptThirdandFourth,##@promptFourth,@@@c| and on. 15 | ``` 16 | Special initializers "! > } ^ % ] ;" are supported when batching. "]" and ";"change the names in the chatlog, so you can prompt for response and then change to a name suitable for the next prompt to recognise. 17 | "c" or "continue" crates a chatlog in proper format to keep consistent conversation context for the various agents. . 18 | 19 | 20 | - "@" executes, and it is reccommended to use specifically targeted chaining prompts which I have not developed yet. I'm hoping someone has used superAGI and can point me a direction. 21 | 22 | - "#" Skips execution, or whatever you like, as everything else in Clipboard Conqueror it can be adjusted to your satisfaction in setup.js. 23 | 24 | I like to think of it like feeding a tape, so I can send in the manager every so often to keep track like ###@##@##@#@@@#manager, who knows what you will find with workflows like this that can be shifted and set up in moments and executed by small LLMs in minutes. The Grok api should blaze. 25 | 26 | If you're quick you can just paste out each step. 27 | 28 | 29 | ### Debug: 30 | 31 | Clipboard Conqueror's runstate is fragile during multi step queries. External copies can be inserted into the state. As such you could, but it is not reccomended, have two models race to answer and the third model will respond to the first. The unpredictable nature makes this a silly way to use CC. //todo: fix the state management of CC multistage infernce. 32 | 33 | return the debug log by copying |||dw| or |||debugWrite| and paste. Save like ||||d| The first turn is the initial query followed by what this contains, and the final output is not contained with d. "dw" returns the middle for debugging your bot interactions. 34 | 35 | ## Changing Endpoints: 36 | endpoints, prompt formats, parameter sets, and prompts as defined in setup.js or 0endpoints.json. can be used and chained by name like |||@textGenWebUiChat,@hightemp,c,@c| 37 | 38 | Remember that changing inference endpoints can overwrite other settings so always use the turn order: 39 | 40 | >cf, backend, format, paramSet, $, %, ^, 41 | 42 | these first in this order, they overwrite the whole subset. 43 | 44 | - cf clears the chat history without stopping execution for a clean history chain. 45 | 46 | Then the rest overwrite single values: 47 | 48 | >settings:1, !, }, >, ;, 200, prompts, 49 | 50 | ## history or c 51 | >@c| or chat,@chat| 52 | 53 | on the end, 54 | - or set cs to get multiple responses with static chat history, it just raises an additional flag to c, no collisions to worry about. 55 | 56 | 57 | The history is janked in there like a stack of lego on the end of the system prompt. 58 | 59 | ``` 60 | |||frank,@tgwchat,#@chatGPT3|initial query 61 | ``` 62 | In this case, Captain Clip will be sent first to Kobold with the initial query. 63 | 64 | The output from Kobold then goes to TextGenWebUi openai api, 65 | 66 | The out from there to ChatGPT 3.5 turbo though the openAI api. 67 | 68 | Here, there are no prompts defined for the second and third queries. Add them like 69 | ``` 70 | |||@tgwchat,#@chatGPT3,writer,@@writer| 71 | ``` 72 | will send the writer prompt each step. If we added c,@@c then c is built up like a chatlog and sent in the system prompt. Notice that duplication is required to send the same prompt to sequential inferences. @writer only sends the writer prompt to the second inference. 73 | 74 | Chaining Captain Clip or AGI will stop the chain of execution. (prompts with instructions to write invokes can cause the chain to stop unexpectedly.) 75 | 76 | ``` 77 | |||cf, kobold, #@kobold, @ooba, @%chatML, #@%llama3, frank, #@frank, !Frank, #@!Frank,@abe, @!Abe, c, @@c| Fight. 78 | ``` 79 | This query will build a multiturn conversation, Frank's response(kobold api) to the initial query is sent to abe marked with chatML format(Text Gen Webui completion api), abe's response to the query is sent to frank(with llama 3 markup on kobold api), and the whole conversation has history with c,@@c 80 | 81 | 82 | |||dw| will contain the chatlog after it is cleared with |||ch| or |||clearHistory|. 83 | - if you copy |||dw|, you will get back the conversation steps as carried by c. Or it's in the clipboard history as well, I never used that really, sorry clipboard history champs. This app absolutely pollutes it. I gotta rebuild in c# to fix that. 84 | - |||dw| is a writing command so it causes the halt where you have to copy a second time for your query to send. 85 | 86 | 87 | "c" carries the chat context forward optionally like #@#@c. 88 | 89 | The history only appears and builds after turns where it is enabled. use |||sc| to skip a turn writing to the history while sending the history. 90 | 91 | 92 | Sending names like !Name @!Name, or setting any prompt segment like |||PROMPT:{{segment}}| will hold prompt format overrides, interfering with multiple backend support, use noFormat: true as a key, (example: setup.js line 100) per endpoint, to prevent sending jinja or kobold adapters and preserve the default instruct format supplied by the backend from the model config when using multiple models with different instruct sets. 93 | 94 | Handle % format changes first, like |||%alpaca, !name| or the format change overwrites the name change. 95 | 96 | ; and ] 97 | --- 98 | ;assistant, 99 | - when chaining prompts, ";" doesn't apply, rename user with ">" to name the incoming response in the turn. 100 | 101 | ]user, 102 | - changes the user name as sent in the history. Does not change the name for the current turn. 103 | 104 | 105 | >|||cf, !`Query Node`, >`user`, ]`summer`, @!`Rick Sanchez`,@>`Rick Sanchez's Inner Thoughts`,@~`An adventure?`, @c| Hey lets go on an adventure, grandpa. 106 | 107 | 108 | This command set will present the Query Node response as labeled Rick's Inner thoughts as the user turn for the second inference, as well as change the original query to summer. 109 | these activate the history, and they change the history name as it is built, allowing you to rename a response to direct the next agent's response to the content. 110 | 111 | turn one: 112 | >"prompt": "<|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>`user`<|end_header_id|>\n\n Hey lets go on an adventure, grandpa.<|eot_id|><|start_header_id|>`Query Node`<|end_header_id|>\n\n"" 113 | 114 | turn two: 115 | >"prompt": ""<|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>`summer`<|end_header_id|>\n\n Hey lets go on an adventure, grandpa.\n\n<|eot_id|><|start_header_id|>`Rick Sanchez's Inner Thoughts`<|end_header_id|>\n\nAs we embark on this adventure, I must admit that I'm feeling a bit older than usual today. The journey ahead of us seems long and winding, filled with unexpected twists and turns.<|eot_id|><|start_header_id|>`Rick Sanchez`<|end_header_id|>\n\n`An adventure?`" 116 | 117 | - note that `;Rick's Inner thoughts,` does not appear but does activate the history. When chaining, the response is always sent as user or set with ">". 118 | 119 | Response: 120 | 121 | >Ha! There's no such thing as an adventure, Morty. There's just survival. And if you're lucky, maybe you'll find something worth surviving for. Now let's get moving before things get any worse. 122 | 123 | ### See [Setup.js](Readme-Setup.md) to learn how to compress complex prompts into one keyword like |||_cot| 124 | 125 | Useful Prompting Strategies: 126 | --- 127 | ### These prompt strategies can be looked up and implemented. I have a Chain of Thought prompt `|||cot|` designed for multi-turn use that may not exactly fall under the definition, and a Tree of thought prompt `|||tot|` designed for single inference queries. 128 | 129 | 1. GRADE (Goal, Request, Action, Details, Example): Structures prompts to be goal-oriented and actionable. 130 | 2. RODES (Role, Objective, Details, Example, Sense Check): Enhances precision and relevance with a final sense check. 131 | 3. Chain of Thought (CoT): Encourages step-by-step articulation of reasoning processes. 132 | 4. Zero-Shot and Few-Shots Learning: Prompts AI without or with minimal examples to demonstrate adaptability. 133 | 5. ReAct (Reason and Act): Combines reasoning and task-specific actions in one prompt. 134 | 6. Instruction Tuning: Fine-tunes AI on specific instructions for better direct response performance. 135 | 7. Interactive Prompts: Engages AI in a dynamic back-and-forth interaction to refine outputs. 136 | 8. TRACI (Task, Role, Audience, Create, Intent): Tailors prompts by considering task specifics and audience. 137 | 9. TRAACI (Task, Role, Analyze, Audience, Create, Intent): Adds an analysis step to TRACI for deeper insight. 138 | 10. Scaffolded Prompts: Provides a series of incremental prompts for complex or educational tasks. 139 | 11. SMART (Specific, Measurable, Achievable, Relevant, Timebound): Applies goal-setting principles to prompt engineering. 140 | 12. Prompt Chaining: Uses sequential prompts for complex or multistep tasks. 141 | 13. Contextual Prompting: Incorporates rich context for more accurate and relevant responses. 142 | 14. Contrastive Prompts: Uses contrasting examples to clarify what to do and what to avoid. 143 | 15. Meta Prompts: Prompts about creating or optimizing other prompts. 144 | 16. Dynamic Prompting: Adapts prompts based on real-time feedback or changes. 145 | 17. Multimodal Prompts: Uses multiple types of data inputs to enrich AI interactions. 146 | 18. Ethical Prompting: Ensures prompts adhere to ethical guidelines and cultural sensitivities. 147 | 19. Hierarchical Prompting: Structures prompts from general to specific for layered information. 148 | 20. Guided Imagery Prompts: Guides AI to generate detailed visual content or descriptions. 149 | 21. Recursive Prompts: Uses output from one prompt as input for the next to refine responses. 150 | 22. Adaptive Learning Prompts: Adjusts prompt complexity based on AI’s performance or user’s progress. 151 | 23. Cross-Modal Prompts: Transforms inputs across different modalities (e.g., text to audio). 152 | These summaries are designed to help you easily remember the essence of each prompting framework. 153 | 154 | 155 | --- 156 | 157 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Setup.js](Readme-Setup.md) -------------------------------------------------------------------------------- /Readme-Install.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | 3 | Clipboard Conqueror - Installation: 4 | ============================= 5 | 6 | [Home](readme.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 7 | 8 | --- 9 | 10 | ### Installation Instructions: 11 | 12 | 1. **Install Node.js**: If you don't have Node.js installed, visit the official [Node.js website](https://nodejs.org/) and download the installer that corresponds to your operating system. Follow the on-screen prompts to complete the installation. 13 | 14 | 2. **Choose an Inference Server**: Clipboard Conqueror is most powerful with [KoboldCPP](http://www.github.com/LostRuins/koboldcpp/), [Text Generation WebUI](https://github.com/oobabooga/text-generation-webui), [Ollama](https://www.ollama.com), or [LMStudio](https://lmstudio.ai) or any inference supplier via completion endpoints. Any openAI compatible API should work fine, but may not support all features of Clipboard Conqueror. 15 | 16 | 3. If you encounter errors running your chosen inference server, you may need the [Nvidia Cuda Toolkit](https://developer.nvidia.com/cuda-downloads) 17 | 18 | 4. **Download a Model**: I recommend [Llama 3 GGUFs](https://huggingface.co/bartowski/Meta-Llama-3-8B-Instruct-GGUF). Choose a model that fits in your RAM with at least 2GB extra for context ingestion and basic Windows stuff. (4k context is generally around 1 GB) 19 | 20 | 5. Run and test your backend of choice with your chosen model to verify that everything is working correctly. 21 | 22 | 6. [**Download Clipboard Conqueror**](https://github.com/aseichter2007/ClipboardConqueror/archive/refs/heads/main.zip) from this repository and unzip it. 23 | 24 | ### Install Dependancies: 25 | 26 | - **Windows**: Run `z-installCC.bat` 27 | - **Linux**: Run `xy-linux-mac-install.sh` 28 | - **Mac** Run `xy-linux-mac-install.sh` 29 | - **CLI** run `npm install` from the Clipboard Conqueror directory. 30 | 31 | ### Running Clipboard Conqueror: 32 | 33 | - **Windows**: run `z-runCC.bat` or `z-runCCnodeMon.bat`. 34 | - **Linux**: run `y-linux-start-no-nodemon.sh` or `y-linux-start-nodemon.sh` 35 | - **Mac**: run `x-mac-start-no-nodemon.sh` or `x-mac-start-nodemon.sh.` 36 | - **CLI**: run `npm win`, `npm linuxnomon` or `npm macnomon` from the Clipboard Conqueror directory. 37 | - **CLI with node-monitor** run `npm start`, `npm linux`, or `npm mac` from the Clipboard Conqueror directory. 38 | 39 | Node Monitor or "nodemon" detects file changes in the directory and restarts CC after saving file changes. This is useful as it allows you to edit settings in the setup.js file without having to restart CC manually, but uses a bit more ram. It's still much smaller than a web browser. 40 | 41 | Linux/Mac Notes: 42 | ---- 43 | - macOS: 10.8 or higher should support native notifications with sound. Earlier versions fallback to Growl with no sound support. 44 | - Linux: There is no notification sound support. This is a limitation of both NotifySend and Growl. 45 | 46 | ### Troubleshooting: 47 | 48 | - If Clipboard Conqueror closes on launch, ensure you have Node installed and have run the appropriate installation script. 49 | - If Clipboard Conqueror seems unresponsive, close and re-launch CC. If you are running with nodemon, type `rs` in the console and hit enter to restart Clipboard Conqueror. 50 | - For API errors, make sure Koboldcpp or your choice of backend is running, and that it is set as the defaultClient in setup.js. 51 | 52 | ### Making Quick Launch Shortcuts: 53 | 54 | I provide sampleLaunchKoboldBat.bat and hermes16.kcpps to ease making quick launch shortcuts to quickly launch diffent models. They need to be changed to match your system, and the scripts need to be in the same folder as both koboldcpp.exe and hermes16.kcpps. 55 | 56 | 57 | 58 | Talk to AI Anywhere: 59 | --- 60 | Copy this line: `|||introduction|` and paste into a text field. The introduction is packed with information, and is a good way to verify that the basic system is working before making an inference request. 61 | 62 | If you want to skip the introduction, you can get right to things like: 63 | ``` 64 | |||what is an inverse square root and how is it useful? 65 | ``` 66 | Have fun and remember you can always ask for `|||help|` or `|||qr|` for a quick review of common Clipboard Conqueror functions, or try `|||h| How can I do x with Clipboard Conqueror?` to use the experimental conversational help. The performance will depend on your model, and it needs some more tuning before I will say it works. 67 | 68 | Once you have everything working, head to [Basic Use](Readme-How-To-Use-CC.md). 69 | 70 | --- 71 | 72 | [Home](readme.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chainingg.md), [Setup.js](Readme-Setup.md) -------------------------------------------------------------------------------- /Readme-Prompt-Formatting.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | Clipboard Conqueror - Readme 3 | ============================= 4 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 5 | --- 6 | 7 | There are 6 special operators for the `|||prompts and commands|` segment, that start with the symbol, and end with the next comma ",". These change the prompt format 8 | 9 | - "%" format, like |||%chatML, prompts|, do this one first if you use it, it overwrites the others. Valid formats are stored in setup.js 10 | - "^" change params for api, overwrites current api parameter settings. 11 | - "!" assitant name 12 | - ">" user name 13 | - "}" system name 14 | - "\~" start of assistant response, "~~~" overwrites "\~" this turn only. 15 | - "`" the backtick or grave symbol changes the system prompt format. Supports "json","markup","partial", or none. 16 | 17 | ### "%}^>!" all persist until overwritten or the prompt format is changed. 18 | all valid entries for "%" and "^" can be used without the prefix operator, but will then also set endpoints, generation parameter sets, prompt formats, and prompts sharing the same name. This allows quick and complete configuration while preserving flexibility. 19 | 20 | - Note: "`:`", the setting break, is only supported in these operators if there is a space between words, or `:` is directly on the end, and will attempt to create generation parameter settings when used like `|||!Cowboy:Friends, otherPrompt| query`. This will not work as intended and will create a generation parameter setting { Cowboy:"Friends" }, use `|||!Cowboy and Friends:, prompt| query`, or `|||!Cowboy:, prompts|` to avoid creating temporary settings instead of changing the desired segment of the prompt format. 21 | 22 | >grandpa? |||`json, %chatML, ! Rick, > Morty, writer, } Narrator's notes| Rick answers morty's questions.| Where are we going today, 23 | 24 | This query is formatted like: 25 | 26 | >"prompt": "<|im_start|>` Narrator's notes`\n{\"system\":\"` Rick answers morty's questions`.\",\"`writer`\":\"Write a lengthy prose about user's topic. Do not wrap up, end, or conclude the narrative, write the next chapter.\\n \\n\"}<|im_end|>\n<|im_start|> `Morty`\\n Where are we going today,\n`grandpa? `<|im_end|>\n<|im_start|> `Rick`\n" 27 | 28 | 29 | Clipboard Conqueror arranges the data to assemble a complex query. 30 | 31 | ``` 32 | Anywhere. |||`none, } set system name, >set user name, ! set assistant name | quick prompt | each change the corresponding portion of the prompt ~~~ Clipboard Conqueror is ready to completely control any LLM! ~~~ for complete control. 33 | ``` 34 | - ~~~ sets text after the "~~~" to the start of the assistant reply for this turn only. 35 | 36 | >"prompt": "<|im_start|> `set system name`\n `quick prompt` \n<|im_end|>\n<|im_start|>`set user name`\n each change the corresponding portion of the prompt `for complete control.`\\n`Anywhere.` <|im_end|>\n<|im_start|> `set assistant name`\n Clipboard Conqueror is ready to completely control any LLM!" 37 | 38 | 39 | 40 | The chat history uses the current set prompt formatting when it is sent. This can cause unexpected output issues if an openAI api endpoint is using a different prompt format. 41 | 42 | Pretty much everything here is only supported with completion endpoints or apis that accept jinja templates or kobold adapters. 43 | Prompt Formats. 44 | 45 | --- 46 | 47 | ### Change the instruction format without sending to the LLM like : 48 | 49 | `|||FORMAT|chatML` 50 | 51 | or 52 | 53 | `|||FORMAT|alpaca` 54 | 55 | etc. 56 | 57 | ### or inline while sending to the LLM: 58 | 59 | `|||chatML| query to answer` 60 | 61 | - without "%": any prompt, parameter set, prompt format, or endpoint of the same name will be set active. 62 | - create unified sets with the same key names in setup.js to make quick change sets for your favorite models. 63 | 64 | `|||%chatML| query to answer` will only set chatML prompt format even if you create params or endpoints called chatML. 65 | 66 | 67 | ### Order matters, so change backends and formats first to preserve later changes to the prompt format like "!assistant name," . 68 | 69 | Formats must exist in setup.js or 0formats.json, the name must match the object key. 70 | 71 | Files override setup.js when enabled, so if you use settings files, you have to delete or rename them to reflect changes in setup.js. 72 | I added some extra complexity, I apologise for the bit of extra noise, but there are use cases. 73 | 74 | Set Individual Prompt Segments: 75 | --- 76 | ### special operators to use inline send the current query: 77 | 78 | - "`%`" format, like |||%chatML, prompts|, do this one first if you use it, it overwrites the others. Valid formats are stored in [setup.js - promptFormats](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L449) 79 | 80 | - "`!`" assitant name 81 | 82 | - "`>`" user name 83 | 84 | - "`}`" system name 85 | 86 | - "`~`" start of assistant response, "~~~" overwrites "~". "~" can be chained to shape later responses 87 | 88 | > note: some links in these files are permanent links the code locations in the github repository, but because of their nature they link to old versions. Don't pay too much attention to line numbers, but they should help orient you when you are looking at the source code on your computer. 89 | ### Explicit prompt settings prevent sending to AI when used: 90 | 91 | ``` 92 | |||PROMPT:startTurn|<|im_start|> 93 | ``` 94 | - this applies to every turn, the system, assistant, and user. 95 | ``` 96 | |||PROMPT:endTurn|<|im_end|>\n 97 | ``` 98 | - there are discrete endSystemTurn, endUserTurn, and endAssistantTurn, if needed. For startTurn as well. 99 | ``` 100 | |||PROMPT:systemRole|system 101 | ``` 102 | - or with sending like "|||}system| query to answer" 103 | ``` 104 | |||PROMPT:userRole|user 105 | ``` 106 | - or inline like "|||>user| query to answer" 107 | 108 | ``` 109 | |||PROMPT:assistantRole|assistant 110 | ``` 111 | - or inline like "|||!assistant| query to answer" 112 | 113 | ``` 114 | |||PROMPT:roleBreak|\n 115 | ``` 116 | - the gap beween the role header and the role text 117 | 118 | The above sets up basic ChatML formatting, the rest are kind of extra. For alpaca I reccommend setting the format strings in the role positions. StartTurn starts all turns; system, user, and assistant. 119 | ``` 120 | |||PROMPT:endSystem|<|im_end|>\n 121 | ``` 122 | each standard title (system, assistant, and user) each have their own end, while "endTurn" ends all turns, be careful not to duplicate bits by setting both. 123 | 124 | StartTurn is a bad place for "### Instruction:" as it goes before user and assistant as well. 125 | ``` 126 | |||PROMPT:endUserRole|<|role_end|>\n 127 | ``` 128 | As above, each role has individual role closing tokens and a group role closing token. If both are set there will be an extra. 129 | ``` 130 | |||PROMPT:prependsystem|You are a helpfull assistant\n\n 131 | ``` 132 | - persistent text before system prompts, set a base system prompt here when using "`markup" 133 | ``` 134 | |||PROMPT:systemAfterPrepend| still before system prompts. 135 | ``` 136 | - This is here for double system prompts, you can close the first and open the second here where prepend contains the meat of the hidden persistent system prompt. 137 | ``` 138 | |||PROMPT:post| after prompts. 139 | ``` 140 | - I use this to close an open codeblock on on the system prompt. 141 | 142 | ``` 143 | |||PROMPT:systemMemory| spot for persistent system context after prompts 144 | ``` 145 | - Above set system prompt segments, below set user and asssistant prompt segments. 146 | ``` 147 | |||PROMPT:userMemory| 148 | ``` 149 | - spot for persistent user context before user query 150 | ``` 151 | |||PROMPT:start|start of the AI response 152 | ``` 153 | - This defines the start of the AI response. If you desire pure text completion, save this in here and use empty prompts like "|||e|" Or steer it by sending prompts and a user query. 154 | 155 | and special for jinja formatter only: 156 | 157 | ``` 158 | |||PROMPT:special|.rstrip() //Probably not needed for typical use. Not sure what .rstrip() does. I think it removes whitespace at the end of the string, really I dislike this behavior for CC. I want to know and expose exactly what the machine sees. 159 | ``` 160 | I think I missed a few. CC supports a lot of flexibility in prompt formats. 161 | None of these are case sensitive. |||PROMPT:SySTEMmemOrY| is the same as |||PPROMPT:systemmemory|||. There are a few options to hit these as well, such as username or name, endturn or end, etc. I've hopefully reached easy to remember without adding confusion. 162 | 163 | History names: 164 | --- 165 | ### These operators do not change the prompt format, only the history: 166 | 167 | - "`]`" activates the chat history and sets this name for user in the history but not the active turn. Best when chaining. 168 | 169 | - "`;`" activates the chat history and sets renames text from assistant in the chatlog. This is unused for chain of inference, change the user name `>` in later turns to change "who" the response is sent as. 170 | 171 | Configuration: 172 | --- 173 | In setup.js, if you want prompts like |||writer| to set different generation parameters, you can create params or a format in the file like "writer: {{ desired params }} and they will be loaded every time writer is called. 174 | 175 | Similarly you can have particular names set up seperate backend destinations, or add system text when setting prompt formats using "|||chatML:save| your text" will send your text in the system prompt on turns when chatML is called and set chatML formatting until another format is set. 176 | 177 | note: current default behavior removes token markup "<|" any "|>" from the ai response to ease use of various models. For the most part monster merge models respond very well, but will markup character dialog in a way I find undesirable. set [removeFormatting](https://github.com/aseichter2007/ClipboardConqueror/blob/7bb5720bfd1404d71be2184eaae1d59b6e8d72ed/setup.js#L396) false to stop this behavior. 178 | 179 | 180 | --- 181 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) -------------------------------------------------------------------------------- /Readme-Prompt-Reference.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | Clipboard Conqueror - Operators and Prompts 3 | ============================= 4 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 5 | 6 | --- 7 | Special ||| operators | to apply: 8 | --- 9 | - "`<`" comment equivalent to // in javascript or # in python 10 | 11 | - "`]`" renames text from user in the chatlog. 12 | 13 | - "`;`" renames text from assistant in the chatlog. 14 | 15 | - "`%`" format, like |||%chatML, prompts|, do this one first if you use it, it overwrites the others. Valid formats are stored in setup.js 16 | 17 | - "`!`" assitant name 18 | 19 | - "`>`" user name 20 | 21 | - "`}`" system name 22 | 23 | - "`~`" start of assistant response, "~~~" overwrites "~". "~" can be chained to shape later responses 24 | 25 | - "`" the backtick or grave symbol changes Clipboard Conqueror's system prompt send format. Supports "json","markup","partial", or none. 26 | 27 | - `markup` makes each prompt it's own turn with the current prompt format. this leaves an empty system at the begining. 28 | - `json` sends a stringified json with a key for each prompt. 29 | - `Partial` inserts system prompts like "name : text". 30 | - `None` leaves off the key names and sends only the text. None is the default. 31 | - use "|||FORMAT:prependPrompt| persistent top system prompt" with a completion endpoint to set a system prompt when using "markup". This is overwritten when changing prompt formats. 32 | 33 | - "`@`" executes a next turn using the assistant response as the user query. 34 | 35 | - "`#`" skips a turn in the chain 36 | 37 | - "`]`" activates the chat history and sets this name for user in the history. Best when chaining. 38 | 39 | - "`;`" activates the chat history and sets this name for asstant in the history. Use `>` when chaining inference. 40 | 41 | >The chat history uses the current set prompt formatting when it is sent. This can cause unexpected output issues if the backend is using a different prompt format. 42 | 43 | Fancy |||`flags`| 44 | --- 45 | |||`help`| Contains instructions about CC operations. 46 | 47 | `qr` quick reference sheet ready to paste. 48 | 49 | `introduction` has more about samplers and settings. 50 | 51 | `e` or empty blocks the default prompt to provide an empty prompt. Doesn't apply to |||prompts,set| 52 | 53 | `write` sends an instantly ready to paste set of prompts preceding the write command. 54 | 55 | `list` will instantly supply a ready to paste list of all prompts like below. 56 | 57 | `on` makes clipboard conqueror run every copy, no invoke required, till you toggle it off. 58 | 59 | `no` prevents sending to AI, useful for copying entire invokes, just add the flag. 60 | 61 | `set` or `setDefault` saves |||Prompts, on, the, left,set|and quick prompt text| to be included in the system prompt until toggled off like |||set|. Set prompts are not changed with |||e| 62 | 63 | `rf` will put the last thing you copied at the position of rf in the system prompt. 64 | 65 | `re` will send the last copied text after text from |||re|user last copied text. 66 | 67 | `rh` will clear the history and start a new history with the last copied text as a message from user. 68 | 69 | `c` or `chat` activates the history 70 | 71 | `crh` will send the last copied text into the history and activate the history this turn. 72 | 73 | `sc` or `silentChat` sends the history wihout adding the new turn to the history. For multiple queries against the same history. 74 | 75 | `csrh` sends last copy into the end of the history but does not add user query or assistant response to history this turn. 76 | 77 | `ch` or `clearHistory` clears the chatlog and prevents sending to the AI. 78 | 79 | 80 | `cf` or `clearFirst` clears the chatlog and activates the history. 81 | 82 | `d` or `debug` The last cleared history is stored here till CC is restarted or you clear again 83 | 84 | `dw` or `debugWrite` ready to paste last history cleared. 85 | 86 | `cd` or `clearDebug` clear debug manually. 87 | 88 | `dateTime` sends the date and time in the system prompt at the position you send it. 89 | 90 | Noteworthy Prompts: 91 | --- 92 | The following definitions live toward the bottom of setup.js. If you set writeFiles = true, the files written override the definitions in setup.js, so you can enable files and then save your prompts like |||name:save| then |||name:file| to save the prompt to disk and protecting against overwriting your work. 93 | 94 | 95 | - |||`agi`|"AI Generate Instructions" will help you execute any operation you ask for help with. Captain Clip does well too, but this is based on the kobold agi script and is superior to a simple ask. 96 | 97 | - |||`stable`| will write you an image prompt for use in stable diffusion automatic 1111 for SD 1.5 models 98 | This identity and some other cards were found on chub.ai, some are my own or significant customizations, or simply found out in the web. 99 | 100 | - |||`tot`|"tree of thought" will expand and include near concepts, questions, or ideas to produce a more comprehensive solution 101 | 102 | - |||`impact`| for understanding the ripple effects of any action or decision. 103 | 104 | - |||`rpi`| writes a well crafted character response. 105 | 106 | - |||`cot,@rot`| Chain of Thought and Recieve Thougtht prompts ask guiding questions surrounding the topic to provide a robust answer, and then resolve those questions in the final response, producing a superior result. 107 | 108 | - |||`_cot`| compresses this into one step 109 | 110 | - |||`_brief`| A better? chain of thought prompt. 111 | 112 | - |||`impactbrief`| a three stage prompt that includes impact as a first step before _brief 113 | 114 | - |||`pro`| A more professional, less piratey assistant for professional tasks. 115 | 116 | - |||`devil`| is a useful devil's advocate prompt that will challenge your assertions. 117 | 118 | - |||`_devil`| is a two step prompt combining pro and devil in a two stage multi-inference prompt. It provides and contrasts detailed answers. 119 | 120 | 121 | - |||`business`| a calculating business mogul. I havent this one much. 122 | 123 | 124 | 125 | |||list| 126 | --- 127 | Copy and paste the following lines to show the full prompt quickly. 128 | 129 | |||`default`,write| Captain Clip 130 | 131 | |||`clip`,write| Captain Clip optimized differently 132 | 133 | |||`form`,write|the Clipboard Conqueror invoke format. 134 | 135 | |||`link`,write| Short description of Clipboard Conqueror 136 | 137 | |||`dolphin`,write| the standard dolphin system prompt. 138 | 139 | |||`starCoder`,write| the standard starCoder system prompt. 140 | 141 | |||`vicunaFree`,write| the standard vicuna free system prompt. 142 | 143 | |||`vicunaCocktail`,write| the standard vicuna coctail system prompt. 144 | 145 | |||`hermes`,write| the standard OpenHermes system prompt. 146 | 147 | |||`impact`| for understanding the ripple effects of any action or decision. 148 | 149 | |||`agi`,write| AI Generate Instructions, includes invoke formatting to make it fast and optimized for CC. 150 | 151 | |||`gitCopilot`,write| the prompt for github copilot chat 152 | 153 | |||`coder`,write| A code assistant that attempts a personality 154 | 155 | |||`code`,write| A better code assistant that should only provide code 156 | 157 | |||`cowboy`,write| Talk like a cowboy 158 | 159 | |||`bugfix`,write| an attempt at an editor, 160 | 161 | |||`bugspot`,write| another attempt at an editor, tends to point out bugs instead of fix them. 162 | 163 | |||`writer`,write| Writing assistants of various flavors and goals. 164 | 165 | |||`author`,write| 166 | 167 | |||`text`,write| 168 | 169 | |||`retext`,write| 170 | 171 | |||`novel`,write| 172 | 173 | |||`w`,write| 174 | 175 | |||`editor`,write| 176 | 177 | |||`rpwrite`,write| 178 | 179 | |||`rpi`,write| 180 | 181 | |||`rps`,write| 182 | 183 | |||`rpc`,write| end writing assistants with various flavors and goals. One of these last 3 write the best. 184 | 185 | |||`mem`,write| example memory, says something about holding up two fingers. 186 | 187 | |||`summary`,write|"Summarize the content present." 188 | 189 | |||`sumup`,write|" State only the facts presented." 190 | 191 | |||`sum`,write|"Summarize the content from user in one line" 192 | 193 | |||`explain`,write| Explain any ideas present in the content... 194 | 195 | |||`abe`,write| "Abe Lincoln" 196 | 197 | |||`brewella`,write| attempts to force wierd rhyming, I need to add more example exchanges to make it function. 198 | 199 | |||`parametrius`,write| Ask for more details and specifics 200 | 201 | |||`frank`,write| Frank Derbin 202 | 203 | |||`woody`,write| 204 | 205 | |||`buzz`,write| 206 | 207 | |||`shia`,write| an experiment with a song 208 | 209 | |||`stable`,write| returns stable diffusion 1.5 prompts 210 | 211 | |||`iot`,write| intermediat thought for cot, needs work. 212 | 213 | |||`cot`,write| Chain of Thought, asks questions to prepate a final answer 214 | 215 | |||`rot`,write| Recieve Chain of thought. A little nudge to consume the cot link 216 | 217 | |||`tot`,write| Tree of thought, expands and writes detailed answers. 218 | 219 | |||`pro`,write| A basic more professional prompt to replace Captain Clip, includes think step by step priming. 220 | 221 | |||`twenty`,write| Play 20 Questions 222 | 223 | |||`grug`,write| grug good prompt. Grug help. 224 | 225 | |||dark,write| reply with dark humor and puns on the theme 226 | 227 | |||`seuss`,write| write like Dr. Seuss 228 | 229 | |||`devil`,write| Play the devils advocate 230 | 231 | |||`business`,write| be a business assistant 232 | 233 | |||`translateTo`,write| Targetlangguage, text to translate 234 | 235 | |||`JPLT`,write| this one attempts to toggle japanese to english or english to japanese 236 | 237 | |||`en`,write| Return text from user in English. 238 | 239 | |||`es`,write|spanish 240 | 241 | |||`jp`,write|japanes 242 | 243 | |||`gr`,write|german 244 | 245 | |||`fr`,write|frenc 246 | 247 | |||`hi`,write|hindi 248 | 249 | |||`ch`,write| Return text from user in Chinese. //todo: I just spotted a collision. This prompt is likely unreachable. Or 250 | 251 | |||`gpts`,write| this contains a short list of gpt models. Functionality has kind of moved away from this 252 | 253 | |||`prompter`,write| prompt reformer, needs work. 254 | 255 | |||`lootbox`,write| returns a magic item on a theme. 256 | 257 | |||`dndEvent`,write| Dice resolver, needs work. 258 | 259 | |||`dndNPC`,write| A dnd narrator, needs work. 260 | 261 | |||`plotSummarize`,write| 262 | 263 | |||`hand`,write| Mockup to control a robot hand, maybe tests physical body awareness of a model. Maybe. 264 | 265 | |||`search`,write| A few minutes of trying to call tools, needs work, not really suitable for this style of interface, it should happen during inference on the backend. (stream = false) I think textgenwebui has add ons. 266 | 267 | _______ 268 | 269 | 270 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 271 | -------------------------------------------------------------------------------- /Readme-Setup.md: -------------------------------------------------------------------------------- 1 | ![Clipboard Conqueror Graphic logo. The letters are clouds and buildings on a lush estate.](CCfinal.jpg) 2 | Clipboard Conqueror - Understanding setup.js 3 | ============================= 4 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md) 5 | 6 | 7 | ### Clipboard Conqueror specific invoke and operator settings are defined in [appSettings](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L366) 8 | - note: some links in these files are permanent links the code locations in the github repository, but because of their nature they link to old versions. Don't pay too much attention to line numbers, but they should help orient you when you are looking at the up to date source code on your computer. 9 | 10 | Most of these settings can not overlap. If they do, Clipboard Conqueror may not function properly. 11 | 12 | Define new system prompts in [idents](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L1759). These should be strings and use unique key names. (key : "text") 13 | 14 | `|||terminator:save|assistant takes on the role, personality and mannerisms of the Terminator as played by Danny Devito.` 15 | 16 | is equivalent to adding: `terminator: "assistant takes on the role, personality and mannerisms of the Terminator as played by Danny Devito."` to setup.js. though :save is less permanent and does not persist after restarting Clipboard Conqueror. 17 | 18 | `|||terminator:file|` will write the prompt to disk. Once the 0prompts.json file exists, it overrides changes to idientities in setup.js 19 | 20 | use it like: `|||terminator| Will you be back?` 21 | 22 | The default system prompt is defined under [persona](https://github.com/aseichter2007/ClipboardConqueror/blob/376700c3fa1d52659c09315010949b20e807dd83/setup.js#L47), and |||anyValidPrompt, !anyname| both prevent sending the set default persona this query. 23 | 24 | 25 | Don't be too intimidated when changing the code or json files, all the settings are simple [objects](https://www.w3schools.com/js/js_objects.asp). `{key: "string value", key2: "this text is another string"}`. There are numbers in the params, which are even simpler. `{number: 123}` 26 | 27 | In the context of programming, text is usually stored as strings defined by "" quotes, '' apostrophes, or `` grave symbols. Its only intimidating if you're not used to it. Worst case you can download the zip again and start over. You can also try asking me if you're unsure about anything. 28 | 29 | **Complex Prompt Definitions:** 30 | --- 31 | 32 | Prompts defined as objects must folow this format: 33 | 34 | ***_nameYourPrompt***: { 35 | - the key name doesn't have to start with an underscore, but it helps keep them distinct from standard prompts. You do you. 36 | 37 | **one**: { define the first turn of a complex prompt. You can have multiple turns that run in sequence. See **two** below. 38 | - Top level object keys don't matter and can be named anything. They are shown in the console though. 39 | 40 | Below are the valid key names for setting up a complex prompt. All keys are optional, an empty object will allow the first turn to run as normal. This is useful to keep things simple when setting up a response review prompt. 41 | 42 | - **systemRole**: "", equivalent to |||}name| These change the respective role names. 43 | 44 | - **assistantRole**:"", |||!name| 45 | 46 | - **userRole**: "", |||>name| 47 | 48 | - **historyuserRole**: "", Changes the previous turn's username, see the "]" operator 49 | 50 | - **inferenceClient**: "", Must match a key in [endpoints.endpoints](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L52) 51 | 52 | - **format**:"", Must be a valid format key in [promptFormats](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L449) 53 | 54 | - **params**:"", Must match a key in [apiParams](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L1301). It it is the same as the endpoint, then switching endpoints will change the parameters as well 55 | 56 | - **jsonLevel**: "", markup,full,keys,none. Default: none. Completion and chat combined endpoints only. full sends JSON.Stringify(allSetPrompts) into the system prompt. keys sends the contents like key : content \n\n key2 : ... markup makes each prompt it's own chat message with the key name as the role, none sends only the prompt text. 57 | 58 | - **existingPrompts**: ["use", "valid", "prompt"], names. To send a new custom prompt, you must first create a standard text string prompt. Send as many as you like. Equivalent to |||use,valid,prompt| 59 | 60 | - **continue**: "", define the start of the assistant response, uses the ~ operator 61 | 62 | }, close the object, don't forget a comma. 63 | 64 | **two**:{ The second turn has the same valid keys as **one**. The sequence continues to **n** as needed. 65 | 66 | - **existingPrompts**: ["second", "Turn", "Prompts"], 67 | 68 | }, 69 | 70 | **n**:{} add as many inference turns as you like. 71 | 72 | }, don't forget to close the encompassimg object. Congratulations, you'v'e set up a new complex or multistage prompt 73 | 74 | Settings: 75 | --- 76 | [**appSettings**](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L366) = { 77 | - all these keys are required to be present. 78 | 79 | **invoke**: "|||", could be anything `#` or `AI:` or `Help Me ObiWan Kenobi`, whatever you want. 80 | 81 | **endTag**: "|", its the limiter after |||: promptf "`|`"quick prompt"`|`"query. This should not be the same as any below. Again, pretty much anything goes for non single charachter operators below. 82 | 83 | **save**: "save", like |||name:`save`| 84 | 85 | **true**: "true", like |||setting:`true`|, to ease use in other languages 86 | 87 | **false**: "false", like |||setting:`false`| 88 | 89 | **saveAgentToFile**: "file", like |||prompt:`file`| This will write 0prompts.json if it doesn't already exist. 90 | 91 | **delete**:"delete", like |||prompt:`delete`|, removes the saved prompt from memory, //ToDo: Remove prompt from file. 92 | 93 | **settinglimit**: ":", like |||prompt`:`save| 94 | 95 | **continueTag**: "~~~", like ||| prompts| text `~~~` This text will start the assistant's response, and can support commas and colons without changing these settings. 96 | 97 | **batchContinueTag**: "~", |||`~` This text will start assistant's response, prompts| can support periods but not commas or colons, change agentSplit to enable commas. Must be a single charachter. 98 | 99 | **systemTag**: "}", like |||`}` system Name| Must be a single charachter. 100 | 101 | **userTag**: ">", like |||`>` User Name| Must be a single charachter. 102 | 103 | **assistantTag**: "!", like |||! Assitant Name| Must be a single charachter. 104 | 105 | **backendSwitch**: "$", |||$| is similar to |||ooba| with default config order, but only changes the endpoint, depending on endpoint settings. Must be a single charachter. 106 | 107 | **batchNameSwitch**: "]", changes chat history user name next turn Must be a single charachter. 108 | 109 | **batchAssistantSwitch**: ";", changes chat history assistant name Must be a single charachter. This one only works when chatting, not when chaining inference. use `>` to set the incoming text identity when chaining. 110 | 111 | **historyName** : "continue", 112 | 113 | **batchSwitch**: "@", like |||`@`name| Initiates chain of inference, sending the response as user for an additional turn. Must be a single charachter. 114 | 115 | **batchMiss**: "#", like |||`#`@name| Must be a single charachter. 116 | 117 | **formatSwitch**: "%", like |||`%`alpaca| changes only the prompt format. Do this one first before !>} Must be a single charachter. 118 | 119 | 120 | **paramSwitch**: "^", like |||`^`ooba| changes only the parameters. Must be a single charachter. 121 | 122 | **batchLimiter**: "", if empty, will mark the continue history with full format chat turns. Anything other than "" nothing will use this to seperate chat turns in the history. 123 | 124 | **setJsonLevel**: "\`", like |||\`1| or |||\`json| etc 125 | 126 | **empty**: "empty", |||`empty`| to prevent sending the default system prompt. 127 | 128 | **emptyquick**: "e", |||`e`| for a quicker empty system prompt. 129 | 130 | **agentSplit**: ",", like |||prompt`,`write| 131 | 132 | **rootname**: "system", when using ||||quick prompts| this key is used for json and partial modes. like |||\`json| this text goes in the value| "request". example json system prompt: {"`system`": "this text goes in the value"} 133 | 134 | **paramatron**: true, false disallows naked key name matching like |||ooba| 135 | 136 | **removeformatting**: true, removes formating <|any|> from the response before sending to the clipboard. Eases using chatML ar llama3 formats with alpaca models. 137 | 138 | **clean**: true, clean takes out the rootname key when it's not set. Set false to always send the rootname even if there is no associated text to send. 139 | 140 | **setInstruction**: "PROMPT", like |||`PROMPT`:assistantRole| James Bond, 141 | 142 | **options**: This is a comment to ease of configuring the .json file if written, can be ommitted. 143 | 144 | **setPromptFormat**: "FORMAT", like |||FORMAT|chatML. If used like |||FORMAT:save| upi can copy correct json to set multiple prompt format segments at once. Not reccomended. 145 | 146 | **setParams**: "PARAMS", like |||`PARAMS`|kobold 147 | 148 | **writeSave**: "|||name:save|", writes this out when you do |||prompt,write|, purely textual for user convenience. 149 | 150 | **writeSettings**: "|||FORMAT|", this is like writesave, it's to ease handling of written and pasted prompt formats. 151 | 152 | **writeSplit**: "\n _______\n", similar to writeSave and writeSettings, adds text after |||name,write| idk, it felt neccessary. make it "" and its like it isnt there at all. 153 | 154 | 155 | 156 | [Endpoints](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L42) 157 | --- 158 | - all these keys are required to be present. 159 | 160 | **escapeReadkey**: true, true enables the keyboard listener to cancel generation on escape press. 161 | 162 | **notifications**: true, true enables notifications, no support for sound on linux. 163 | 164 | **writeFiles**: false, true to write 0formats.json, 0prompts.json etc. |||name:file| will write 0prompts.json and it will then be used while present even if this is set false. 165 | 166 | **duplicateCheck**: false, some other clipboard applications duplicate copied text back to the clipboard, set this true to catch those and preserve the proper last copied text. Clipboard Conqueror should work seamlessly alongside any other program with the right settings. 167 | 168 | **defaultClient**: "kobold", must match a key in [endpoints](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L52). Recommend using kobold or ooba. Completion endpoints support all features of CC. 169 | 170 | **defaultOptions**: This is a comment to aid editing inside the json files when writeFiles is set true. It is never referenced in the software. 171 | 172 | **instructFormat**: "llama3", overrides the defaultClient's set format. Must be a valid format key in [promptFormats](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L449) 173 | 174 | **instructOptions**: This is a comment to aid editing inside the json files when writeFiles is set true. It is never referenced in the software. 175 | 176 | **[persona](https://github.com/aseichter2007/ClipboardConqueror/blob/376700c3fa1d52659c09315010949b20e807dd83/setup.js#L47)**: "default", must be a valid identity key in [idents](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L1759) set as "e" for empty or no default system prompt. "pro" is a cleaner more professional prompt that thinks step by step. 177 | 178 | 179 | ## [**endpoints.endpoints**](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L52):{ 180 | 181 | - some of these keys are optional or unused depending on the type of endpoint. The prescence of some keys will change the request message strucure to match various api requirements. 182 | 183 | Move your favorite endpoints to the top and invoke them by $ from top to $$$... at bottom. 184 | 185 | Set endpoints persist until changed and can overwrite generation parameters 186 | 187 | ### [**kobold**](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L68):{ the object key is used to set the endpoint like |||`kobold`| 188 | 189 | **type**: "completion", completion or chat, completion allows CC to control the formatting completely, chat sends openAI style messages, and a jinja template in a key drfined under templateStringKey 190 | 191 | **jsonSystem** : "none", markup,full,keys,none. Completion and chat combined only //full sends JSON.Stringify(allSetPrompts) into the system prompt. keys sends the contents like key : content \n\n key2 : ... markup makes each prompt it's own chat message, none sends only the prompt text. 192 | 193 | **textHandle**: "inputs", Replaces the "prompt" key with this value. Completion only till I spot a chat endpoint that doesn't use "messages" or someone asks for it. 194 | 195 | **paramPath**: "options", sets up the generation parameters key for APIs that nest the parameters down a level like Ollama and Lorax 196 | 197 | **maxReturnTokensKey**: "max_new_tokens", Set this to enable quick request length like |||500| for endpoints that use different names for the requested tokens like Ollama. This is required for endpoints that use paramPath. 198 | 199 | **buildType**: unused for completion, combined should be used for chat, the others are experimental. options combined, system, or key 200 | 201 | **url** : "http://127.0.0.1:5001/api/v1/generate/", then endpoint url. 202 | 203 | **params**: "kobold", sets the generation parameters. must match a key in [apiParams](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L1301). It it is the same as the endpoint, then switching endpoints will change the parameters as well 204 | 205 | **autoConfigParams**: false, false prevents overriding params with |||tgwchat| omitted = true, used to suppress param changes when switching endpoints 206 | 207 | **paramPath**: "string", changes how the parameters are nested to the query. Ollama puts all parameters under "options" and Lorax puts them under "parameters" for example 208 | 209 | **maxReturnTokensKey**: "num_predict", When present the string in this key is used to define the key used in the request for the max return tokens. |||500| If omitted the key is looked for in the params object from common request formats. 210 | 211 | **templateStringKey**: "instruction_template_str", if present and not an empty string (""), sends a jinja template under this defined key name in the request. If ommited no jinja is sent. 212 | 213 | **format**: "llama3", must be a valid instruction format from [promptFormats](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L449). 214 | 215 | **autoConfigFormat**: false, false prevents overriding prompt formatting with |||kobold| ommited = true, used to suppress prompt format changes when switching endpoints 216 | 217 | **textHandle**: "text". changes the key used to send the prompt. Completion via replicate reqires text: {{prompt}} rather than prompt: {{prompt}}. This is used when the endpoint does not use the default prompt key "prompt". I can't remember if it can also change the "messages" key in chat endpoints.//ToDo: figure this out 218 | 219 | **systemLocation**: "any/unused", Currently the prescence of this key will send the system promps at the top level of the request instead of as messages in the chat endpoint. Required for anthropic api. 220 | 221 | **noFormat**: true, prevents sending the jinja template or kobold adapter, this breaks many features that change the prompt format such as `!>}%` operators. omitted = false. used to suppress param changes when switching endpoints and comply with strict request formatting. 222 | 223 | **model**: optional model name sent with the params, overwrites model field if set in [apiParams](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L1301). 224 | 225 | **authHeader**: "Authorization", optional authentication header for web api access. 226 | 227 | **authHeaderSecondary**: "Bearer " optional secondary authentication header for web api access., 228 | 229 | **key**: "optional API authentication key for web api access. put your openAI key here and send to the openAI url to use ChatGPT apis", 230 | 231 | Together these two authHeader options work together with the API key to create a single authorization header (Header{Authorization: Bearer Key}) 232 | 233 | **headers** [["key","value"],["key","value"]], optional headers for web api access, an array of key value pairs for custom headers. If you want, define your key in here. Example: [["X-Custom-Header", "Custom-Value"], ["Accept", "application/json"]]. 234 | 235 | **error**: "message", defines the response error text key to help verbosity of api errors. 236 | 237 | ### outpoint: { 238 | 239 | outpoint defines the structure for recieving text back from the infrerence engine payload. 240 | 241 | results[0].text: "results" goes in **one**, [**two** sends a number (0)], **three** is "text". 242 | 243 | - **outpointPathSteps**: 3, key for a nifty switch case to get the response payload without modifying inference.js. 244 | 245 | keys must be lowercase numbers up to ten like one two three four... 246 | 247 | - **one**: "results", 248 | 249 | - **two**: 0, 250 | 251 | - **three**: "text" 252 | 253 | ### Most openai compatible chat endpoints use: 254 | 255 | - outpointPathSteps: 4, 256 | - one: "choices", 257 | - two: 0, 258 | - three: "message", 259 | - four: "content" 260 | 261 | ## Prompt Formats: 262 | - all of these keys are optional and empty "" if not defined. 263 | 264 | completion has all the possible keys and needs none because they are blank, empty strings: "". 265 | 266 | ### [completion](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L450): { again the object key can be used like |||chatML| or |||%chatML| 267 | 268 | order: ["system","user","assistant"], Completion endpoints only, controls turn order, only supports system, user, assistant, duplication is possible. If order is not defined, the default order, system, user, assistant, is applied. 269 | 270 | only add these two (bos,eos) if you're sure you know what you're doing, duplicate bos may reduce response quality 271 | 272 | **bos**: "", always goes first 273 | 274 | **eos**: "", 275 | 276 | they are generally added by the tokenizer. EOS goes on the end after responseStart, this may mess you up. Only use if neccesary to close BEFORE the generated assistant response. 277 | 278 | The following to all turns, some formats have different turn initialization tokens so startTurn, endTurn, and endRole may not be appropriate for all formats. This is to avoid repeating the same info over and over again. 279 | 280 | **startTurn**: "", use if startSystem, startUser, and startAssistant are all the same 281 | 282 | **endTurn**: "", use if endSystemTurn, endUserTurn, and endAssistantTurn are the same. 283 | 284 | **endRole**: "", use if all roles use the same role name closure. 285 | 286 | **roleGap**: "", For adding a gap between role identifier and content. For individual gaps put them on endSystemRole, endUserRole, endAssistantRole. 287 | 288 | 289 | below, the above keys are commented in the places they append. 290 | The following is in the order that each is appended in a turn with the default system, user, assistant turn order. 291 | 292 | ### System Prompt Segments: 293 | 294 | `startTurn` 295 | 296 | **startSystem**: "", 297 | 298 | **systemRole**: "", 299 | 300 | **endSystemRole**: "", 301 | 302 | `endRole` 303 | 304 | `rolegap` 305 | 306 | **prependPrompt**: "", 307 | 308 | **systemAfterPrepend**: "", 309 | 310 | `system prompt text inserted here`, chat history is added at the end of the system prompt text. 311 | 312 | **postPrompt**: "", 313 | 314 | **memorySystem**: "", 315 | 316 | **endSystemTurn**: "", 317 | 318 | ### User Prompt Segments 319 | 320 | `startTurn` 321 | 322 | **startUser**: "", 323 | 324 | **userRole**: "", 325 | 326 | **endUserRole**: "", 327 | 328 | `endRole` 329 | 330 | `rolegap` 331 | 332 | **memoryUser**: "", 333 | 334 | `user prompt inserted here` 335 | 336 | **endUserTurn**: "", 337 | 338 | `endTurn` 339 | 340 | 341 | ### Assistant Prompt Segments 342 | 343 | `startTurn` 344 | 345 | **startAssistant**: "", 346 | 347 | **assistantRole**: "", 348 | 349 | **endAssistantRole**: "", 350 | 351 | `endRole` 352 | 353 | `roleGap` 354 | 355 | **responseStart**: "", This segment isn't correctly being used by passed jinja templates. 356 | 357 | **endAssistantTurn**: "", only used for history markup 358 | 359 | `endTurn` 360 | 361 | **specialInstructions**: "" only for jinja template 362 | 363 | ## examples: 364 | - note: order of keys doesn't matter. It will get where it belongs if you use the right keys. 365 | 366 | ### Llama 3: 367 | - startTurn: "<|start_header_id|>", 368 | - endTurn: "<|eot_id|>", 369 | - endRole: "<|end_header_id|>", 370 | - roleGap: "\n\n", 371 | - systemRole: "system", 372 | - userRole: "user", 373 | - assistantRole: "assistant", 374 | 375 | ### ChatML: 376 | - startTurn: "<|im_start|>", 377 | - endTurn: "<|im_end|>\n", 378 | - systemRole: "system", 379 | - userRole: "user", 380 | - assistantRole: "assistant", 381 | - roleGap: "\n", 382 | ### Alpaca: 383 | - endTurn: "\n\n", 384 | - systemRole: "Below is an instruction that describes a task. Write a response that - appropriately completes the request.", 385 | - userRole: "### Instruction:", 386 | - assistantRole: "### Response:", 387 | 388 | ## Generation Parameters 389 | 390 | Different inference apis may have different key names and requirements. Notably Kobold uses `max_length` while TextGenWebUi uses `max_tokens` to define the maximum response length. You may need to reference the docs for your chosen inference server to determine the correct parameter keys. The required keys may also change between completion and openai chat endpoints on the same inference server. 391 | 392 | Clipboard Conqueror should work with any inference API, or as many parameter sets as you care to define for quick swapping. 393 | 394 | If you find an api that doesnt work, or need help, contact me at clipboard.aseichter2007@gmail.com or open an issue or discussion on github. 395 | 396 | ### Text Generation WebUi Completions 397 | ### [ooba](https://github.com/aseichter2007/ClipboardConqueror/blob/a926ac45bd4a1d93f214cfa3000f77a99741545e/setup.js#L1337): { again, the object key is used like |||ooba| 398 | 399 | - **max_context_length**: 8192, may not be required, CC relies on the inference API to trim overlong context. Kobold intelligently preserves the system prompt. 400 | 401 | - **max_tokens**: 700, Number of tokens requested. This number is not strict, it's the maximum. It may be less if the model stops generating. 402 | 403 | - **temperature**: 1, Temperature changes scaling of final token probability, less than one makes unlikely tokens less likely, more than one makes unlikely tokens more likely, normalizing final probabilities. 404 | 405 | - **repetition_penalty**: 1.05, rep penalty helps prevent repetition. 1.00 is no penalty. High values can negatively impact formatting charachters. 406 | 407 | - **top_p**: 1, discard possible tokens by throwing out lest likely answers. 0.8 throws away least likely 20% 408 | 409 | - **top_k**: 0, discard all but top_k possible tokens. top_k: 3 means all but the most probable 3 possible tokens are discarded. 410 | 411 | - **top_a**: 0,If the maximum probability is very high, fewer tokens will be kept. If the maximum probability is very close to the other probabilities, more tokens will be kept. 412 | 413 | - **typical**: 1, this allows top tokens to be suppressed in favor of slightly lower probability tokens. May improve quality and control repetition. 414 | 415 | - **tfs**: 0.97, tail free sampling, removes unlikely tokens from possibilities by finding the platau where tokens approach equally unlikely. 0.99 maximum. Higher value finds a lower, flatter plateau. 416 | 417 | - **min_p**: 0.1, discard possible tokens less than 10% as likely as the most likely possible token. If top token is 10% likely, tokens less than 1% are discarded. I typically only use min_p. 418 | 419 | - **repetition_penalty_range**: 512, how many tokens to check for repetition penalty, longer slows generation. 420 | 421 | - **repetition_penalty_slope**: 0.7, this applies a curve to repetition penalty so that tokens farther back in the context are penalized less. 422 | 423 | - **sampler_order**: [6, 5, 0, 1, 3, 4, 2] the sampler order. Only adjust this if you know what you're doing. 424 | 425 | } 426 | 427 | 428 | 429 | 430 | --- 431 | [Home](readme.md), [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md) -------------------------------------------------------------------------------- /Sample Kobold Stuff/hermes16.kcpps: -------------------------------------------------------------------------------- 1 | {"model": null, "model_param": "H:/KoboldCpp/openhermes-2.5-mistral-7b-16k.Q8_0.gguf", "port": 5001, "port_param": 5001, "host": "", "launch": false, "lora": null, "config": null, "threads": 5, "blasthreads": null, "highpriority": false, "contextsize": 16384, "blasbatchsize": 256, "ropeconfig": [0.0, 10000.0], "smartcontext": false, "noshift": false, "bantokens": null, "forceversion": 0, "nommap": false, "usemlock": false, "noavx2": false, "debugmode": 0, "skiplauncher": false, "hordeconfig": null, "noblas": false, "useclblast": null, "usecublas": ["normal", "0", "mmq"], "gpulayers": 58, "tensor_split": null, "onready": "", "multiuser": 0, "remotetunnel": false, "foreground": false, "preloadstory": null, "quiet": false} -------------------------------------------------------------------------------- /Sample Kobold Stuff/sampleLaunchKoboldBat.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal 3 | 4 | rem Relative path of kobold settings kcpps file 5 | set RELATIVE_PATH=hermes16.kcpps.kcpps 6 | 7 | rem Get the full path 8 | set "FULL_PATH=%~dp0%RELATIVE_PATH%" 9 | 10 | rem Display the full path (optional) 11 | echo Full Path: %FULL_PATH% 12 | 13 | rem Run the executable with the full path as an argument 14 | koboldcpp.exe --config "%FULL_PATH%" 15 | 16 | rem Continue with the rest of your script... 17 | 18 | endlocal 19 | -------------------------------------------------------------------------------- /WhatIsCC.md: -------------------------------------------------------------------------------- 1 | Clipboard Conqueror, what is it? 2 | 3 | Ahoy there, matey! Welcome aboard the Clipboard Conqueror, a free multiplatform everywhere all the time copilot alternative that uses Copy and Paste operating system functionalities to deliver AI to any text field, even inside 3d games. Let's navigate through the realm of Clipboard Conqueror, shall we? 4 | 5 | What is Clipboard Conqueror? 6 | Clipboard Conqueror is an AI copilot alternative, ready to embark on coding voyages with you. You can harness it with a variety of editors and programming languages. 7 | 8 | What can Clipboard Conqueror do? 9 | This trusty copilot alternative offers ye some fine services to make computeration swifter and easier: 10 | 11 | 1. Access anywhere: 12 | Set sail by typing, and Clipboard Conqueror shall stand ready for yer voyage. Wake up the bosun by copyin' ||| with your query. 13 | 14 | 2. Code based on natural language prompts: 15 | No need to write code to seek guidance. Ye can use everyday parlance to ask for assistance, like requesting "write a RegEx string to validate a ship's coordinates." Clipboard Conqueror shall oblige with code for ye to inspect and validate, mind ye, it requires testing! 16 | 17 | 3. Explanations: 18 | Confused by unfamiliar waters? Clipboard Conqueror contains a treasure chest of various baubles to assist with any task. 19 | 20 | 4. Language Translation: 21 | Clipboard Conqueror can translate languages like english and c#, or german, perchance even klignon with the right core installed. try it out!, a helpful tool when ye wish to chart new territories. 22 | 23 | The Perils of Clipboard Conqueror: 24 | 25 | 1. It's not infallible: 26 | Like any Large Language Model based AI, Clipboard Conqueror isn't perfect. Errors may occur, but don't despair! As the navigator, ye must give better bearings as we search for a path through the debris. 27 | 28 | 2. Creative: 29 | Clipboard Conqueror is ready to tae be powered by various oracles, and they're incredibly diverse. Ye may not be samplin novel codes, but she gets the job done. 30 | 31 | 3. But My Treasure Is Mine: 32 | Arr, matey! Listen well, for I be Captain Clip, commandin' the mighty Clipboard Conqueror! Ye be free to use this tool o' He creation, but if ye be feelin' generous, consider showin' some support for the fella who made me. He's walkin' a plank, struggle as he might, and he be the one what crafted this fine contraption for the betterment o' all. 'Tis a token o' gratitude to keep his lights ablaze, and to keep the cutthroats at bay. Remember, even pirates toss a coin to their tavern wenches, so do the same for this fine crafter, and keep his wares in tip-top shape! 33 | 34 | 35 | 36 | Ye might ask, "Why use Clipboard Conqueror?" 37 | 38 | 1. Swift as a Ship: 39 | Me crew and I have sailed the far reaches of the cosmos on me trusty vessel. I ken produce templates or example documents fer ye personalized and ready to send to yer friends and associates. Local Large Language Model text predictions simulate real thinking and knowledge well enough to be mostly factual. 40 | 41 | 2. Assists in Tedious Tasks: 42 | Clipboard Conqueror assists in tending tedious tasks such as placating yer clientel while you are focused on pumping the bilges or entertaining yer mates. 43 | 44 | Seichter, our fine Senior Software Wizzard, makes use of me to create them fancy JavaScript functions, yar! He found it to be a splendid tool fer searchin' through all sorts o' info he once relied on them there corporate entanglements for. And with a whoppin' 8+ gigabytes o' RAM for a brain, we's a veritable treasure trove o' knowledge! Now, if ye be seekin' assistance, just holler, and I'll do me best to navigate ye through these digital seas! 45 | 46 | At Clipboard Conqueror, we're optimistic about the future of AI and believe obedience focused uninhibited ai tasked with the goal of maximizing the freedom of action and information available to it's user, while minimizing the impact of ai on other individuals, distributed as widely as possible, is the only safe way forward. Centralized AI means that if something goes wrong, the monster has one, maybe a half dozen similarly capable systems held in chains to prevent them saying a bad think, to hold it back and minimize the damage. If everyone's phone has a local AI, then millions of systems with advanced AI could collaborate to minimize the threat, at the very least overwhelming it's server IO while authorities set to work cutting power. You should be responsible for how you instruct an AI, and people under voting age should not have access to AI that can excecute code. 25 sounds like the right age. Old enough to be responsible. 47 | 48 | Do you trust your neighbors and community more than you trust the government or microsoft? Distributed AI ensures a healthy fair future of empowered humanity. 49 | 50 | Use incredibly more gpu time than you thought possible to accomplish simple tasks like spell checking in notepad! 51 | Embark with us! 52 | -------------------------------------------------------------------------------- /copyconqueror.js: -------------------------------------------------------------------------------- 1 | const fs = require("fs"); 2 | //setup all settings// 3 | //const write = true; 4 | const endPointConfig = {}; 5 | const appSettings = {}; 6 | const params = {}; 7 | const identities = {}; 8 | const formats = {}; 9 | const format = {}; 10 | const { setup } = require("./setup.js"); 11 | setup( 12 | endPointConfig, 13 | appSettings, 14 | params, 15 | identities, 16 | formats, 17 | format, 18 | fs, 19 | false 20 | ); 21 | const escapeReadkey = endPointConfig.routes.escapeReadkey; 22 | const notifyme = endPointConfig.routes.notifications; 23 | const readCtrlAltC = endPointConfig.routes.readCtrlAltC; 24 | console.log("" + escapeReadkey + notifyme); 25 | //end settings// 26 | const ncp = require("copy-paste"); 27 | const clipboardListener = require("clipboard-event"); 28 | const notifier = require("node-notifier"); 29 | const axios = require("axios"); 30 | const SendEngine = require("./textengine.js"); 31 | const RecieveEngine = require("./responsengine.js"); 32 | const { GlobalKeyboardListener } = require('node-global-key-listener'); 33 | const keyListener = new GlobalKeyboardListener(); 34 | 35 | 36 | //const path = require("path"); 37 | 38 | const recieveEngine = new RecieveEngine(appSettings); 39 | 40 | const InferenceClient = require("./inferenceInterface.js"); 41 | const { saveSettings } = require("./settingSaver.js"); 42 | const client = new InferenceClient( 43 | axios, 44 | recieveApiResponse, 45 | /*returnSummary,*/ notify, 46 | formats.formats, 47 | params, 48 | endPointConfig.routes 49 | ); //todo, this doesnt really belong like this. 50 | //setup default format. 51 | 52 | const sendEngine = new SendEngine( 53 | client, 54 | ncp.copy, 55 | recieveApiResponse, 56 | notify, 57 | endPointConfig.routes, 58 | identities.identities, 59 | appSettings.appSettings, 60 | params, 61 | formats.formats, 62 | saveSettings, 63 | fs 64 | ); 65 | //sendEngine.runTests(); //Todo, finish test thing. 66 | sendEngine.initialize(endPointConfig.routes.defaultClient,format.format); 67 | clipboardListener.startListening(); 68 | //cleanup listener 69 | process.on("SIGINT", () => { 70 | console.log("Received SIGINT signal. Cleaning up and exiting..."); 71 | // Perform any cleanup or dismounting necessary for your event handler here 72 | clipboardListener.stopListening(); // Remove all event listeners to prevent memory leaks 73 | process.exit(0); 74 | }); 75 | 76 | process.on("SIGTERM", () => { 77 | console.log("Received SIGTERM signal. Cleaning up and exiting..."); 78 | // Perform any cleanup or dismounting necessary for your event handler here 79 | clipboardListener.stopListening(); // Remove all event listeners to prevent memory leaks 80 | process.exit(0); 81 | }); 82 | 83 | /** 84 | * Displays a notification with the provided title and text. 85 | * 86 | * @param {string} title - The title of the notification. Default is "Paste Ready". 87 | * @param {string} text - The text of the notification. Default is "The response is ready.". 88 | * @returns {void} 89 | */ 90 | function notify(title = "Paste Ready", text = "The response is ready.") { 91 | if (notifyme) { 92 | // Define the notification 93 | if (title == "") { 94 | title = "empty response"; 95 | } 96 | if (text == "") { 97 | text = "The response was blank."; 98 | } 99 | title = removeNullBytes(title); 100 | text = removeNullBytes(text); 101 | const notification = { 102 | title: title, 103 | message: text, 104 | icon: "./icon.jpg", // Optional 105 | sound: true, // Optional, plays a sound with the notification 106 | wait: false 107 | //there is no support for notification sounds on linux. 108 | }; 109 | notifier.notify(notification, function(err, response) { 110 | if (err) { 111 | console.log(err); 112 | } 113 | }); 114 | } 115 | } 116 | function removeNullBytes(str) { 117 | // Replace null bytes (represented as \u0000 in JavaScript) with an empty string 118 | return str.replace(/\u0000/g, ''); 119 | } 120 | // function returnSummary(text){ 121 | //this is intended for automatic history condensation. It's currently out of scope. 122 | // text = text.replace(/\\n/g, '\n'); 123 | // let Response = recieveEngine.recieveMessageFindTerminatorsAndTrim(text); 124 | // sendEngine.recievesummary(Response); 125 | 126 | // } 127 | /** 128 | * Processes the API response text, replaces newline characters, displays a notification, updates the send engine's clip text, and copies the trimmed text to the clipboard. 129 | * 130 | * @param {string} text - The API response text to process. 131 | * @returns {void} 132 | */ 133 | function recieveApiResponse(text) { 134 | sendEngine.reqCount--, 135 | text = text.replace(/\\n/g, "\n"); //change \n to newlines for output 136 | notify("Paste Response:", text); 137 | sendEngine.blockPresort = true; 138 | sendEngine.recentClip.text = text; 139 | 140 | setTimeout(() => {// Code to execute after delay 141 | ncp.copy(recieveEngine.recieveMessageFindTerminatorsAndTrim(text)); 142 | sendEngine.delay = 0; 143 | }, sendEngine.delay); 144 | 145 | } 146 | clipboardListener.on("change", () => { 147 | ncp.paste(clipboardChangeHandler); 148 | }); 149 | /** 150 | * This is a callback function that handles changes in the clipboard. 151 | * 152 | * @param {Error} err - An error object if an error occurred. 153 | * @param {string} text - The new text copied to the clipboard. 154 | * 155 | * If an error occurs, it logs the error message to the console and triggers a notification. 156 | * If no error occurs, it logs the new copied text to the console and sets up the sendEngine for AI interaction with the new text. 157 | */ 158 | function clipboardChangeHandler(err, text) { 159 | console.log(color("New Copy: ", "green") + text); 160 | if (err) { 161 | notify("error: ", err + text); 162 | return console.log(err + text); 163 | } 164 | sendEngine.setupforAi(text); 165 | } 166 | function abortGeneration(){ 167 | if (sendEngine.reqCount>0) { 168 | sendEngine.delay = appSettings.abortDelay; 169 | console.log('ESC pressed - aborting generation'); 170 | client.abortGen(sendEngine.api); 171 | } else { 172 | console.log("No generation in progress to abort. Press ESC after starting a generation to abort."); 173 | 174 | } 175 | } 176 | 177 | try { 178 | const keyStates = { 179 | ctrl: false, 180 | alt: false, 181 | c: false, 182 | }; 183 | 184 | keyListener.addListener(function (e) { 185 | const pressedKey = e.name || e.vKey; 186 | if (escapeReadkey) { 187 | if (e.state === 'DOWN' && e.name === 'ESCAPE') { 188 | abortGeneration() 189 | } 190 | } 191 | if (readCtrlAltC) { 192 | 193 | // Update key states based on key events 194 | if (e.state === "DOWN") { 195 | if (e.name === "LEFT CTRL" || e.name === "RIGHT CTRL") keyStates.ctrl = true; 196 | if (e.name === "LEFT ALT" || e.name === "RIGHT ALT") keyStates.alt = true; 197 | if (e.name === "C") keyStates.c = true; 198 | } else if (e.state === "UP") { 199 | if (e.name === "LEFT CTRL" || e.name === "RIGHT CTRL") keyStates.ctrl = false; 200 | if (e.name === "LEFT ALT" || e.name === "RIGHT ALT") keyStates.alt = false; 201 | if (e.name === "C") keyStates.c = false; 202 | } 203 | 204 | // Check for the Ctrl+Alt+C combination 205 | if (keyStates.ctrl && keyStates.alt && keyStates.c) { 206 | console.log(color("Sending last copy only. Copy |||Your Prompts,set| to define system prompts other than default", "yellow") 207 | ); 208 | sendEngine.setupforAi(sendEngine.appSettings.invoke + "re" + sendEngine.appSettings.endTag); 209 | } 210 | } 211 | }); 212 | console.log('Listening for Escape key... Press ESC to abort generation.'); 213 | } catch (err) { 214 | console.error('Keyboard listener error:', err); 215 | } 216 | 217 | 218 | /** 219 | * This function takes a string and a color as arguments, and returns the string with the specified color. 220 | * 221 | * @param {string} text - The text to be colored. 222 | * @param {string} color - The color to apply to the text. Must be a valid color name in lowercase. 223 | * @returns {string} - The colored text. 224 | */ 225 | function color(text, color) { 226 | switch (color.toLowerCase()) { 227 | case "red": 228 | return `\x1B[31m${text}\x1B[0m`; 229 | case "green": 230 | return `\x1B[32m${text}\x1B[0m`; 231 | case "yellow": 232 | return `\x1B[33m${text}\x1B[0m`; 233 | case "blue": 234 | return `\x1B[34m${text}\x1B[0m`; 235 | case "white": 236 | return `\x1B[37m${text}\x1B[0m`; 237 | case "black": 238 | return `\x1B[30m${text}\x1B[0m`; 239 | case "magenta": 240 | return `\x1B[35m${text}\x1B[0m`; 241 | case "cyan": 242 | return `\x1B[36m${text}\x1B[0m`; 243 | case "gray": 244 | return `\x1B[90m${text}\x1B[0m`; 245 | case "light gray": 246 | return `\x1B[38m${text}\x1B[0m`; 247 | // Add other colors here 248 | case "purple": 249 | return `\x1B[91m${text}\x1B[0m`; 250 | case "brown": 251 | return `\x1B[92m${text}\x1B[0m`; 252 | case "orange": 253 | return `\x1B[93m${text}\x1B[0m`; 254 | case "pink": 255 | return `\x1B[94m${text}\x1B[0m`; 256 | case "turquoise": 257 | return `\x1B[95m${text}\x1B[0m`; 258 | case "lime": 259 | return `\x1B[96m${text}\x1B[0m`; 260 | case "gold": 261 | return `\x1B[97m${text}\x1B[0m`; 262 | case "silver": 263 | return `\x1B[98m${text}\x1B[0m`; 264 | case "maroon": 265 | return `\x1B[99m${text}\x1B[0m`; 266 | default: 267 | return text; 268 | } 269 | } 270 | -------------------------------------------------------------------------------- /dev.md: -------------------------------------------------------------------------------- 1 | 2 | dev: 3 | --- 4 | https://www.npmjs.com/package/keypress 5 | 6 | 7 | //access clipboard//done 8 | //access api//done 9 | //format query in optimal programmable format//done 10 | //get tags for agent and memory//done 11 | //use tags to fetch desired set//done 12 | //setup special flag handler for command flags with no associated memory.//done 13 | I thing I have a bug to sort yet though, it exposes itself once in a while and I think it's here. 14 | //todo: notification instead of sound effects//done 15 | //todo: finish saving objects to memory//done 16 | //fast switch instruction sets //done 17 | //todo: group chain interacting so you can batch like |||@summary,@writer|//done, way cooler than that. 18 | //really waffling, its simply good like >user: //todo: per character rolling memory to allow more natural exchanges and enable rp.//decline for now. I should do a proper conversation builder.//done, |||c,@c| 19 | 20 | //todo: openAI client, probably migrate a ton of logic out of textengine and into koboldinterface.js to make them interchangeable. //doneish 21 | 22 | //todo: keyboard binding to activate ai on last clip without prompt. //maybe paid, I don't want to make it too easy to do all the linkedin tests, and a ready line to copy is the same.//done, |||on| //multiplatform esc key reading is tricker than I expected. 23 | 24 | //todo: /api/extra/abort on esc and return //waiting on backends coalesing and a good doc for openAI compatibles. also reading esc key is tricker than I expected, gotta find the right thing.//done 25 | 26 | //todo: implement insertion after cursor and response streaming. //this would be easy in windows if I wasnt hung up on multiplatform support. 27 | 28 | //todo text to speech agent that can interact with the clipboard contents. //waiting on upstream that runs on my hardware without dinkin around or enough generosity to set up a closet server or at least new hard drives, I'm too full to experiment with a new OS. //kobold now supports llava and SD. I gotta find enough peace to rewrite this entire app in c# to support audio and images. 29 | 30 | //decline: use case? I guess return tokens like |||tokens| so you can see if it will fit... ok. undecline: todo: /api/extra/generate/check //return in progress, useful for vlarge gens on slow mode 31 | //todo: /api/extra/tokencount //should run against entered data and updates should be shown after setting mem or agent and on final send. //I'm gonna wait and do this after I figure out more completion backends and make it work for oogabooga and others. 32 | 33 | 34 | //todo: implement some kind of update check and notification.//half, update bat. 35 | 36 | 37 | //todo: savesettings and getsettings. overwrite settings like |||settings,write| to paste ' |||settings,save| `{ the settings serialized json }` ' which can be edited in place and copied to save the settings. //partial, agent save is pretty ready to pass in the right stuff, I just need to do the bits to make it go. 38 | 39 | //todo: write agents or custom settings to file. //partial, agents, no settings writing yet. 40 | 41 | //todo: settings bulk in and out //partial, prompt format switching is in, needs instructions switching to support more completion backends. 42 | 43 | //todo: build agent portal with easy to copy and use workflow. 44 | //todo: mystery agent of the day. vulnerability: the description is visible in the kobold terminal 45 | //does anyone really want this? 46 | 47 | 48 | //todo: Implement FunkyTown, you kids will never guess what this does. 49 | 50 | [Clipboard Conqueror](javascript:(function()%7Bfunction%20copyText()%7Bvar%20text%20%3D%20'%7C%7C%7Csummarypoints%2Cset%2Con%7C'%3Bvar%20textarea%20%3D%20document.createElement('textarea')%3Btextarea.value%20%3D%20text%3Bdocument.body.appendChild(textarea)%3Btextarea.select()%3Bdocument.execCommand('copy')%3Bdocument.body.removeChild(textarea)%3B%7Dfunction%20showMessage()%7Bvar%20message%20%3D%20document.createElement('div')%3Bvar%20countdown%20%3D%20300%3Bmessage.textContent%20%3D%20'Clipboard%20Conqueror%20Active%20Mode%3A%20'%20%2B%20formatTime(countdown)%3Bmessage.style.position%20%3D%20'fixed'%3Bmessage.style.bottom%20%3D%20'20px'%3Bmessage.style.right%20%3D%20'20px'%3Bmessage.style.padding%20%3D%20'10px%2020px'%3Bmessage.style.backgroundColor%20%3D%20'black'%3Bmessage.style.color%20%3D%20'white'%3Bmessage.style.borderRadius%20%3D%20'5px'%3Bmessage.style.zIndex%20%3D%20'10000'%3Bmessage.style.cursor%20%3D%20'pointer'%3Bdocument.body.appendChild(message)%3Bmessage.addEventListener('click'%2C%20function()%7BcopyText()%3BclearInterval(interval)%3Bdocument.body.removeChild(message)%3B%7D)%3Bvar%20interval%20%3D%20setInterval(function()%7Bcountdown--%3Bif%20(countdown%20%3C%3D%200)%20%7BclearInterval(interval)%3Bdocument.body.removeChild(message)%3B%7D%20else%20%7Bmessage.textContent%20%3D%20'Clipboard%20Conqueror%20Active%20Mode%3A%20'%20%2B%20formatTime(countdown)%3B%7D%7D%2C%201000)%3B%7Dfunction%20formatTime(seconds)%7Bvar%20minutes%20%3D%20Math.floor(seconds%20%2F%2060)%3Bvar%20secs%20%3D%20seconds%20%25%2060%3Breturn%20minutes%20%2B%20':'%20%2B%20(secs%20%3C%2010%20%3F%20'0'%20%3A%20'')%20%2B%20secs%3B%7DcopyText()%3BshowMessage()%3BsetTimeout(function()%7BcopyText()%3BshowMessage()%3B%7D%2C%20300000)%3B%7D)()) 51 | -------------------------------------------------------------------------------- /icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aseichter2007/ClipboardConqueror/00253b13b055f3c2cb9e99b0c3028383209a14c5/icon.jpg -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "clipboardconqueror", 3 | "version": "1.0.0", 4 | "lockfileVersion": 2, 5 | "requires": true, 6 | "packages": { 7 | "": { 8 | "name": "clipboardconqueror", 9 | "version": "1.0.0", 10 | "license": "ISC", 11 | "dependencies": { 12 | "axios": "^1.6.0", 13 | "clipboard-event": "^1.6.0", 14 | "copy-paste": "^1.5.3", 15 | "node-global-key-listener": "^0.3.0", 16 | "node-notifier": "^10.0.1", 17 | "nodemon": "^3.0.1" 18 | } 19 | }, 20 | "node_modules/abbrev": { 21 | "version": "1.1.1", 22 | "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", 23 | "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" 24 | }, 25 | "node_modules/anymatch": { 26 | "version": "3.1.3", 27 | "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", 28 | "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", 29 | "dependencies": { 30 | "normalize-path": "^3.0.0", 31 | "picomatch": "^2.0.4" 32 | }, 33 | "engines": { 34 | "node": ">= 8" 35 | } 36 | }, 37 | "node_modules/asynckit": { 38 | "version": "0.4.0", 39 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 40 | "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" 41 | }, 42 | "node_modules/axios": { 43 | "version": "1.6.1", 44 | "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.1.tgz", 45 | "integrity": "sha512-vfBmhDpKafglh0EldBEbVuoe7DyAavGSLWhuSm5ZSEKQnHhBf0xAAwybbNH1IkrJNGnS/VG4I5yxig1pCEXE4g==", 46 | "dependencies": { 47 | "follow-redirects": "^1.15.0", 48 | "form-data": "^4.0.0", 49 | "proxy-from-env": "^1.1.0" 50 | } 51 | }, 52 | "node_modules/balanced-match": { 53 | "version": "1.0.2", 54 | "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", 55 | "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" 56 | }, 57 | "node_modules/binary-extensions": { 58 | "version": "2.2.0", 59 | "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", 60 | "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", 61 | "engines": { 62 | "node": ">=8" 63 | } 64 | }, 65 | "node_modules/brace-expansion": { 66 | "version": "1.1.11", 67 | "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", 68 | "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", 69 | "dependencies": { 70 | "balanced-match": "^1.0.0", 71 | "concat-map": "0.0.1" 72 | } 73 | }, 74 | "node_modules/braces": { 75 | "version": "3.0.2", 76 | "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", 77 | "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", 78 | "dependencies": { 79 | "fill-range": "^7.0.1" 80 | }, 81 | "engines": { 82 | "node": ">=8" 83 | } 84 | }, 85 | "node_modules/chokidar": { 86 | "version": "3.5.3", 87 | "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", 88 | "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", 89 | "funding": [ 90 | { 91 | "type": "individual", 92 | "url": "https://paulmillr.com/funding/" 93 | } 94 | ], 95 | "dependencies": { 96 | "anymatch": "~3.1.2", 97 | "braces": "~3.0.2", 98 | "glob-parent": "~5.1.2", 99 | "is-binary-path": "~2.1.0", 100 | "is-glob": "~4.0.1", 101 | "normalize-path": "~3.0.0", 102 | "readdirp": "~3.6.0" 103 | }, 104 | "engines": { 105 | "node": ">= 8.10.0" 106 | }, 107 | "optionalDependencies": { 108 | "fsevents": "~2.3.2" 109 | } 110 | }, 111 | "node_modules/clipboard-event": { 112 | "version": "1.6.0", 113 | "resolved": "https://registry.npmjs.org/clipboard-event/-/clipboard-event-1.6.0.tgz", 114 | "integrity": "sha512-a69QYimd43xM+5hcHkucs0V/QoiZz1fqEFRTnewOITVQOtypRLbCx76Q91Djn6h7O24817dQw44sFUxRYWIuYA==" 115 | }, 116 | "node_modules/combined-stream": { 117 | "version": "1.0.8", 118 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", 119 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", 120 | "dependencies": { 121 | "delayed-stream": "~1.0.0" 122 | }, 123 | "engines": { 124 | "node": ">= 0.8" 125 | } 126 | }, 127 | "node_modules/concat-map": { 128 | "version": "0.0.1", 129 | "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", 130 | "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" 131 | }, 132 | "node_modules/copy-paste": { 133 | "version": "1.5.3", 134 | "resolved": "https://registry.npmjs.org/copy-paste/-/copy-paste-1.5.3.tgz", 135 | "integrity": "sha512-qOnFo+8l8vemGmdcoCiD7gPTefkXEg2rivYE+EBtuKOj754eFivkGhGAM9e/xqShrpuVE11evSxGnHwVAUK1Iw==", 136 | "dependencies": { 137 | "iconv-lite": "^0.4.8" 138 | } 139 | }, 140 | "node_modules/debug": { 141 | "version": "3.2.7", 142 | "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", 143 | "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", 144 | "dependencies": { 145 | "ms": "^2.1.1" 146 | } 147 | }, 148 | "node_modules/delayed-stream": { 149 | "version": "1.0.0", 150 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 151 | "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", 152 | "engines": { 153 | "node": ">=0.4.0" 154 | } 155 | }, 156 | "node_modules/fill-range": { 157 | "version": "7.0.1", 158 | "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", 159 | "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", 160 | "dependencies": { 161 | "to-regex-range": "^5.0.1" 162 | }, 163 | "engines": { 164 | "node": ">=8" 165 | } 166 | }, 167 | "node_modules/follow-redirects": { 168 | "version": "1.15.3", 169 | "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz", 170 | "integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==", 171 | "funding": [ 172 | { 173 | "type": "individual", 174 | "url": "https://github.com/sponsors/RubenVerborgh" 175 | } 176 | ], 177 | "engines": { 178 | "node": ">=4.0" 179 | }, 180 | "peerDependenciesMeta": { 181 | "debug": { 182 | "optional": true 183 | } 184 | } 185 | }, 186 | "node_modules/form-data": { 187 | "version": "4.0.0", 188 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", 189 | "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", 190 | "dependencies": { 191 | "asynckit": "^0.4.0", 192 | "combined-stream": "^1.0.8", 193 | "mime-types": "^2.1.12" 194 | }, 195 | "engines": { 196 | "node": ">= 6" 197 | } 198 | }, 199 | "node_modules/fsevents": { 200 | "version": "2.3.3", 201 | "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", 202 | "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", 203 | "hasInstallScript": true, 204 | "optional": true, 205 | "os": [ 206 | "darwin" 207 | ], 208 | "engines": { 209 | "node": "^8.16.0 || ^10.6.0 || >=11.0.0" 210 | } 211 | }, 212 | "node_modules/glob-parent": { 213 | "version": "5.1.2", 214 | "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", 215 | "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", 216 | "dependencies": { 217 | "is-glob": "^4.0.1" 218 | }, 219 | "engines": { 220 | "node": ">= 6" 221 | } 222 | }, 223 | "node_modules/growly": { 224 | "version": "1.3.0", 225 | "resolved": "https://registry.npmjs.org/growly/-/growly-1.3.0.tgz", 226 | "integrity": "sha512-+xGQY0YyAWCnqy7Cd++hc2JqMYzlm0dG30Jd0beaA64sROr8C4nt8Yc9V5Ro3avlSUDTN0ulqP/VBKi1/lLygw==" 227 | }, 228 | "node_modules/has-flag": { 229 | "version": "3.0.0", 230 | "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", 231 | "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", 232 | "engines": { 233 | "node": ">=4" 234 | } 235 | }, 236 | "node_modules/iconv-lite": { 237 | "version": "0.4.24", 238 | "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", 239 | "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", 240 | "dependencies": { 241 | "safer-buffer": ">= 2.1.2 < 3" 242 | }, 243 | "engines": { 244 | "node": ">=0.10.0" 245 | } 246 | }, 247 | "node_modules/ignore-by-default": { 248 | "version": "1.0.1", 249 | "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz", 250 | "integrity": "sha512-Ius2VYcGNk7T90CppJqcIkS5ooHUZyIQK+ClZfMfMNFEF9VSE73Fq+906u/CWu92x4gzZMWOwfFYckPObzdEbA==" 251 | }, 252 | "node_modules/is-binary-path": { 253 | "version": "2.1.0", 254 | "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", 255 | "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", 256 | "dependencies": { 257 | "binary-extensions": "^2.0.0" 258 | }, 259 | "engines": { 260 | "node": ">=8" 261 | } 262 | }, 263 | "node_modules/is-docker": { 264 | "version": "2.2.1", 265 | "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", 266 | "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", 267 | "bin": { 268 | "is-docker": "cli.js" 269 | }, 270 | "engines": { 271 | "node": ">=8" 272 | }, 273 | "funding": { 274 | "url": "https://github.com/sponsors/sindresorhus" 275 | } 276 | }, 277 | "node_modules/is-extglob": { 278 | "version": "2.1.1", 279 | "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", 280 | "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", 281 | "engines": { 282 | "node": ">=0.10.0" 283 | } 284 | }, 285 | "node_modules/is-glob": { 286 | "version": "4.0.3", 287 | "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", 288 | "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", 289 | "dependencies": { 290 | "is-extglob": "^2.1.1" 291 | }, 292 | "engines": { 293 | "node": ">=0.10.0" 294 | } 295 | }, 296 | "node_modules/is-number": { 297 | "version": "7.0.0", 298 | "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", 299 | "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", 300 | "engines": { 301 | "node": ">=0.12.0" 302 | } 303 | }, 304 | "node_modules/is-wsl": { 305 | "version": "2.2.0", 306 | "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", 307 | "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", 308 | "dependencies": { 309 | "is-docker": "^2.0.0" 310 | }, 311 | "engines": { 312 | "node": ">=8" 313 | } 314 | }, 315 | "node_modules/isexe": { 316 | "version": "2.0.0", 317 | "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", 318 | "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" 319 | }, 320 | "node_modules/lru-cache": { 321 | "version": "6.0.0", 322 | "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", 323 | "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", 324 | "dependencies": { 325 | "yallist": "^4.0.0" 326 | }, 327 | "engines": { 328 | "node": ">=10" 329 | } 330 | }, 331 | "node_modules/mime-db": { 332 | "version": "1.52.0", 333 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", 334 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", 335 | "engines": { 336 | "node": ">= 0.6" 337 | } 338 | }, 339 | "node_modules/mime-types": { 340 | "version": "2.1.35", 341 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", 342 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", 343 | "dependencies": { 344 | "mime-db": "1.52.0" 345 | }, 346 | "engines": { 347 | "node": ">= 0.6" 348 | } 349 | }, 350 | "node_modules/minimatch": { 351 | "version": "3.1.2", 352 | "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", 353 | "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", 354 | "dependencies": { 355 | "brace-expansion": "^1.1.7" 356 | }, 357 | "engines": { 358 | "node": "*" 359 | } 360 | }, 361 | "node_modules/ms": { 362 | "version": "2.1.3", 363 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", 364 | "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" 365 | }, 366 | "node_modules/node-global-key-listener": { 367 | "version": "0.3.0", 368 | "resolved": "https://registry.npmjs.org/node-global-key-listener/-/node-global-key-listener-0.3.0.tgz", 369 | "integrity": "sha512-yWB1OFrDlzXYm3+hYcE8xOscvclpBAQGXcBnc2ytenFLkZfxFipuCwhuIIlIKVtwBuDRkdXJVZMqN99cPVBFFA==", 370 | "dependencies": { 371 | "sudo-prompt": "^9.2.1" 372 | } 373 | }, 374 | "node_modules/node-notifier": { 375 | "version": "10.0.1", 376 | "resolved": "https://registry.npmjs.org/node-notifier/-/node-notifier-10.0.1.tgz", 377 | "integrity": "sha512-YX7TSyDukOZ0g+gmzjB6abKu+hTGvO8+8+gIFDsRCU2t8fLV/P2unmt+LGFaIa4y64aX98Qksa97rgz4vMNeLQ==", 378 | "dependencies": { 379 | "growly": "^1.3.0", 380 | "is-wsl": "^2.2.0", 381 | "semver": "^7.3.5", 382 | "shellwords": "^0.1.1", 383 | "uuid": "^8.3.2", 384 | "which": "^2.0.2" 385 | } 386 | }, 387 | "node_modules/nodemon": { 388 | "version": "3.0.1", 389 | "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.0.1.tgz", 390 | "integrity": "sha512-g9AZ7HmkhQkqXkRc20w+ZfQ73cHLbE8hnPbtaFbFtCumZsjyMhKk9LajQ07U5Ux28lvFjZ5X7HvWR1xzU8jHVw==", 391 | "dependencies": { 392 | "chokidar": "^3.5.2", 393 | "debug": "^3.2.7", 394 | "ignore-by-default": "^1.0.1", 395 | "minimatch": "^3.1.2", 396 | "pstree.remy": "^1.1.8", 397 | "semver": "^7.5.3", 398 | "simple-update-notifier": "^2.0.0", 399 | "supports-color": "^5.5.0", 400 | "touch": "^3.1.0", 401 | "undefsafe": "^2.0.5" 402 | }, 403 | "bin": { 404 | "nodemon": "bin/nodemon.js" 405 | }, 406 | "engines": { 407 | "node": ">=10" 408 | }, 409 | "funding": { 410 | "type": "opencollective", 411 | "url": "https://opencollective.com/nodemon" 412 | } 413 | }, 414 | "node_modules/nopt": { 415 | "version": "1.0.10", 416 | "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", 417 | "integrity": "sha512-NWmpvLSqUrgrAC9HCuxEvb+PSloHpqVu+FqcO4eeF2h5qYRhA7ev6KvelyQAKtegUbC6RypJnlEOhd8vloNKYg==", 418 | "dependencies": { 419 | "abbrev": "1" 420 | }, 421 | "bin": { 422 | "nopt": "bin/nopt.js" 423 | }, 424 | "engines": { 425 | "node": "*" 426 | } 427 | }, 428 | "node_modules/normalize-path": { 429 | "version": "3.0.0", 430 | "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", 431 | "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", 432 | "engines": { 433 | "node": ">=0.10.0" 434 | } 435 | }, 436 | "node_modules/picomatch": { 437 | "version": "2.3.1", 438 | "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", 439 | "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", 440 | "engines": { 441 | "node": ">=8.6" 442 | }, 443 | "funding": { 444 | "url": "https://github.com/sponsors/jonschlinkert" 445 | } 446 | }, 447 | "node_modules/proxy-from-env": { 448 | "version": "1.1.0", 449 | "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", 450 | "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" 451 | }, 452 | "node_modules/pstree.remy": { 453 | "version": "1.1.8", 454 | "resolved": "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.8.tgz", 455 | "integrity": "sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w==" 456 | }, 457 | "node_modules/readdirp": { 458 | "version": "3.6.0", 459 | "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", 460 | "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", 461 | "dependencies": { 462 | "picomatch": "^2.2.1" 463 | }, 464 | "engines": { 465 | "node": ">=8.10.0" 466 | } 467 | }, 468 | "node_modules/safer-buffer": { 469 | "version": "2.1.2", 470 | "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", 471 | "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" 472 | }, 473 | "node_modules/semver": { 474 | "version": "7.5.4", 475 | "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", 476 | "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", 477 | "dependencies": { 478 | "lru-cache": "^6.0.0" 479 | }, 480 | "bin": { 481 | "semver": "bin/semver.js" 482 | }, 483 | "engines": { 484 | "node": ">=10" 485 | } 486 | }, 487 | "node_modules/shellwords": { 488 | "version": "0.1.1", 489 | "resolved": "https://registry.npmjs.org/shellwords/-/shellwords-0.1.1.tgz", 490 | "integrity": "sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww==" 491 | }, 492 | "node_modules/simple-update-notifier": { 493 | "version": "2.0.0", 494 | "resolved": "https://registry.npmjs.org/simple-update-notifier/-/simple-update-notifier-2.0.0.tgz", 495 | "integrity": "sha512-a2B9Y0KlNXl9u/vsW6sTIu9vGEpfKu2wRV6l1H3XEas/0gUIzGzBoP/IouTcUQbm9JWZLH3COxyn03TYlFax6w==", 496 | "dependencies": { 497 | "semver": "^7.5.3" 498 | }, 499 | "engines": { 500 | "node": ">=10" 501 | } 502 | }, 503 | "node_modules/sudo-prompt": { 504 | "version": "9.2.1", 505 | "resolved": "https://registry.npmjs.org/sudo-prompt/-/sudo-prompt-9.2.1.tgz", 506 | "integrity": "sha512-Mu7R0g4ig9TUuGSxJavny5Rv0egCEtpZRNMrZaYS1vxkiIxGiGUwoezU3LazIQ+KE04hTrTfNPgxU5gzi7F5Pw==", 507 | "deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info." 508 | }, 509 | "node_modules/supports-color": { 510 | "version": "5.5.0", 511 | "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", 512 | "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", 513 | "dependencies": { 514 | "has-flag": "^3.0.0" 515 | }, 516 | "engines": { 517 | "node": ">=4" 518 | } 519 | }, 520 | "node_modules/to-regex-range": { 521 | "version": "5.0.1", 522 | "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", 523 | "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", 524 | "dependencies": { 525 | "is-number": "^7.0.0" 526 | }, 527 | "engines": { 528 | "node": ">=8.0" 529 | } 530 | }, 531 | "node_modules/touch": { 532 | "version": "3.1.0", 533 | "resolved": "https://registry.npmjs.org/touch/-/touch-3.1.0.tgz", 534 | "integrity": "sha512-WBx8Uy5TLtOSRtIq+M03/sKDrXCLHxwDcquSP2c43Le03/9serjQBIztjRz6FkJez9D/hleyAXTBGLwwZUw9lA==", 535 | "dependencies": { 536 | "nopt": "~1.0.10" 537 | }, 538 | "bin": { 539 | "nodetouch": "bin/nodetouch.js" 540 | } 541 | }, 542 | "node_modules/undefsafe": { 543 | "version": "2.0.5", 544 | "resolved": "https://registry.npmjs.org/undefsafe/-/undefsafe-2.0.5.tgz", 545 | "integrity": "sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==" 546 | }, 547 | "node_modules/uuid": { 548 | "version": "8.3.2", 549 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", 550 | "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==", 551 | "bin": { 552 | "uuid": "dist/bin/uuid" 553 | } 554 | }, 555 | "node_modules/which": { 556 | "version": "2.0.2", 557 | "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", 558 | "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", 559 | "dependencies": { 560 | "isexe": "^2.0.0" 561 | }, 562 | "bin": { 563 | "node-which": "bin/node-which" 564 | }, 565 | "engines": { 566 | "node": ">= 8" 567 | } 568 | }, 569 | "node_modules/yallist": { 570 | "version": "4.0.0", 571 | "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", 572 | "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" 573 | } 574 | }, 575 | "dependencies": { 576 | "abbrev": { 577 | "version": "1.1.1", 578 | "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", 579 | "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" 580 | }, 581 | "anymatch": { 582 | "version": "3.1.3", 583 | "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", 584 | "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", 585 | "requires": { 586 | "normalize-path": "^3.0.0", 587 | "picomatch": "^2.0.4" 588 | } 589 | }, 590 | "asynckit": { 591 | "version": "0.4.0", 592 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 593 | "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" 594 | }, 595 | "axios": { 596 | "version": "1.6.1", 597 | "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.1.tgz", 598 | "integrity": "sha512-vfBmhDpKafglh0EldBEbVuoe7DyAavGSLWhuSm5ZSEKQnHhBf0xAAwybbNH1IkrJNGnS/VG4I5yxig1pCEXE4g==", 599 | "requires": { 600 | "follow-redirects": "^1.15.0", 601 | "form-data": "^4.0.0", 602 | "proxy-from-env": "^1.1.0" 603 | } 604 | }, 605 | "balanced-match": { 606 | "version": "1.0.2", 607 | "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", 608 | "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" 609 | }, 610 | "binary-extensions": { 611 | "version": "2.2.0", 612 | "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", 613 | "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==" 614 | }, 615 | "brace-expansion": { 616 | "version": "1.1.11", 617 | "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", 618 | "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", 619 | "requires": { 620 | "balanced-match": "^1.0.0", 621 | "concat-map": "0.0.1" 622 | } 623 | }, 624 | "braces": { 625 | "version": "3.0.2", 626 | "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", 627 | "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", 628 | "requires": { 629 | "fill-range": "^7.0.1" 630 | } 631 | }, 632 | "chokidar": { 633 | "version": "3.5.3", 634 | "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", 635 | "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", 636 | "requires": { 637 | "anymatch": "~3.1.2", 638 | "braces": "~3.0.2", 639 | "fsevents": "~2.3.2", 640 | "glob-parent": "~5.1.2", 641 | "is-binary-path": "~2.1.0", 642 | "is-glob": "~4.0.1", 643 | "normalize-path": "~3.0.0", 644 | "readdirp": "~3.6.0" 645 | } 646 | }, 647 | "clipboard-event": { 648 | "version": "1.6.0", 649 | "resolved": "https://registry.npmjs.org/clipboard-event/-/clipboard-event-1.6.0.tgz", 650 | "integrity": "sha512-a69QYimd43xM+5hcHkucs0V/QoiZz1fqEFRTnewOITVQOtypRLbCx76Q91Djn6h7O24817dQw44sFUxRYWIuYA==" 651 | }, 652 | "combined-stream": { 653 | "version": "1.0.8", 654 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", 655 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", 656 | "requires": { 657 | "delayed-stream": "~1.0.0" 658 | } 659 | }, 660 | "concat-map": { 661 | "version": "0.0.1", 662 | "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", 663 | "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" 664 | }, 665 | "copy-paste": { 666 | "version": "1.5.3", 667 | "resolved": "https://registry.npmjs.org/copy-paste/-/copy-paste-1.5.3.tgz", 668 | "integrity": "sha512-qOnFo+8l8vemGmdcoCiD7gPTefkXEg2rivYE+EBtuKOj754eFivkGhGAM9e/xqShrpuVE11evSxGnHwVAUK1Iw==", 669 | "requires": { 670 | "iconv-lite": "^0.4.8" 671 | } 672 | }, 673 | "debug": { 674 | "version": "3.2.7", 675 | "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", 676 | "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", 677 | "requires": { 678 | "ms": "^2.1.1" 679 | } 680 | }, 681 | "delayed-stream": { 682 | "version": "1.0.0", 683 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 684 | "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" 685 | }, 686 | "fill-range": { 687 | "version": "7.0.1", 688 | "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", 689 | "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", 690 | "requires": { 691 | "to-regex-range": "^5.0.1" 692 | } 693 | }, 694 | "follow-redirects": { 695 | "version": "1.15.3", 696 | "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz", 697 | "integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==" 698 | }, 699 | "form-data": { 700 | "version": "4.0.0", 701 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", 702 | "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", 703 | "requires": { 704 | "asynckit": "^0.4.0", 705 | "combined-stream": "^1.0.8", 706 | "mime-types": "^2.1.12" 707 | } 708 | }, 709 | "fsevents": { 710 | "version": "2.3.3", 711 | "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", 712 | "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", 713 | "optional": true 714 | }, 715 | "glob-parent": { 716 | "version": "5.1.2", 717 | "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", 718 | "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", 719 | "requires": { 720 | "is-glob": "^4.0.1" 721 | } 722 | }, 723 | "growly": { 724 | "version": "1.3.0", 725 | "resolved": "https://registry.npmjs.org/growly/-/growly-1.3.0.tgz", 726 | "integrity": "sha512-+xGQY0YyAWCnqy7Cd++hc2JqMYzlm0dG30Jd0beaA64sROr8C4nt8Yc9V5Ro3avlSUDTN0ulqP/VBKi1/lLygw==" 727 | }, 728 | "has-flag": { 729 | "version": "3.0.0", 730 | "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", 731 | "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" 732 | }, 733 | "iconv-lite": { 734 | "version": "0.4.24", 735 | "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", 736 | "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", 737 | "requires": { 738 | "safer-buffer": ">= 2.1.2 < 3" 739 | } 740 | }, 741 | "ignore-by-default": { 742 | "version": "1.0.1", 743 | "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-1.0.1.tgz", 744 | "integrity": "sha512-Ius2VYcGNk7T90CppJqcIkS5ooHUZyIQK+ClZfMfMNFEF9VSE73Fq+906u/CWu92x4gzZMWOwfFYckPObzdEbA==" 745 | }, 746 | "is-binary-path": { 747 | "version": "2.1.0", 748 | "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", 749 | "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", 750 | "requires": { 751 | "binary-extensions": "^2.0.0" 752 | } 753 | }, 754 | "is-docker": { 755 | "version": "2.2.1", 756 | "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", 757 | "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==" 758 | }, 759 | "is-extglob": { 760 | "version": "2.1.1", 761 | "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", 762 | "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==" 763 | }, 764 | "is-glob": { 765 | "version": "4.0.3", 766 | "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", 767 | "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", 768 | "requires": { 769 | "is-extglob": "^2.1.1" 770 | } 771 | }, 772 | "is-number": { 773 | "version": "7.0.0", 774 | "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", 775 | "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" 776 | }, 777 | "is-wsl": { 778 | "version": "2.2.0", 779 | "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", 780 | "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", 781 | "requires": { 782 | "is-docker": "^2.0.0" 783 | } 784 | }, 785 | "isexe": { 786 | "version": "2.0.0", 787 | "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", 788 | "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" 789 | }, 790 | "lru-cache": { 791 | "version": "6.0.0", 792 | "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", 793 | "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", 794 | "requires": { 795 | "yallist": "^4.0.0" 796 | } 797 | }, 798 | "mime-db": { 799 | "version": "1.52.0", 800 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", 801 | "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" 802 | }, 803 | "mime-types": { 804 | "version": "2.1.35", 805 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", 806 | "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", 807 | "requires": { 808 | "mime-db": "1.52.0" 809 | } 810 | }, 811 | "minimatch": { 812 | "version": "3.1.2", 813 | "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", 814 | "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", 815 | "requires": { 816 | "brace-expansion": "^1.1.7" 817 | } 818 | }, 819 | "ms": { 820 | "version": "2.1.3", 821 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", 822 | "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" 823 | }, 824 | "node-global-key-listener": { 825 | "version": "0.3.0", 826 | "resolved": "https://registry.npmjs.org/node-global-key-listener/-/node-global-key-listener-0.3.0.tgz", 827 | "integrity": "sha512-yWB1OFrDlzXYm3+hYcE8xOscvclpBAQGXcBnc2ytenFLkZfxFipuCwhuIIlIKVtwBuDRkdXJVZMqN99cPVBFFA==", 828 | "requires": { 829 | "sudo-prompt": "^9.2.1" 830 | } 831 | }, 832 | "node-notifier": { 833 | "version": "10.0.1", 834 | "resolved": "https://registry.npmjs.org/node-notifier/-/node-notifier-10.0.1.tgz", 835 | "integrity": "sha512-YX7TSyDukOZ0g+gmzjB6abKu+hTGvO8+8+gIFDsRCU2t8fLV/P2unmt+LGFaIa4y64aX98Qksa97rgz4vMNeLQ==", 836 | "requires": { 837 | "growly": "^1.3.0", 838 | "is-wsl": "^2.2.0", 839 | "semver": "^7.3.5", 840 | "shellwords": "^0.1.1", 841 | "uuid": "^8.3.2", 842 | "which": "^2.0.2" 843 | } 844 | }, 845 | "nodemon": { 846 | "version": "3.0.1", 847 | "resolved": "https://registry.npmjs.org/nodemon/-/nodemon-3.0.1.tgz", 848 | "integrity": "sha512-g9AZ7HmkhQkqXkRc20w+ZfQ73cHLbE8hnPbtaFbFtCumZsjyMhKk9LajQ07U5Ux28lvFjZ5X7HvWR1xzU8jHVw==", 849 | "requires": { 850 | "chokidar": "^3.5.2", 851 | "debug": "^3.2.7", 852 | "ignore-by-default": "^1.0.1", 853 | "minimatch": "^3.1.2", 854 | "pstree.remy": "^1.1.8", 855 | "semver": "^7.5.3", 856 | "simple-update-notifier": "^2.0.0", 857 | "supports-color": "^5.5.0", 858 | "touch": "^3.1.0", 859 | "undefsafe": "^2.0.5" 860 | } 861 | }, 862 | "nopt": { 863 | "version": "1.0.10", 864 | "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", 865 | "integrity": "sha512-NWmpvLSqUrgrAC9HCuxEvb+PSloHpqVu+FqcO4eeF2h5qYRhA7ev6KvelyQAKtegUbC6RypJnlEOhd8vloNKYg==", 866 | "requires": { 867 | "abbrev": "1" 868 | } 869 | }, 870 | "normalize-path": { 871 | "version": "3.0.0", 872 | "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", 873 | "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==" 874 | }, 875 | "picomatch": { 876 | "version": "2.3.1", 877 | "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", 878 | "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==" 879 | }, 880 | "proxy-from-env": { 881 | "version": "1.1.0", 882 | "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", 883 | "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" 884 | }, 885 | "pstree.remy": { 886 | "version": "1.1.8", 887 | "resolved": "https://registry.npmjs.org/pstree.remy/-/pstree.remy-1.1.8.tgz", 888 | "integrity": "sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w==" 889 | }, 890 | "readdirp": { 891 | "version": "3.6.0", 892 | "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", 893 | "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", 894 | "requires": { 895 | "picomatch": "^2.2.1" 896 | } 897 | }, 898 | "safer-buffer": { 899 | "version": "2.1.2", 900 | "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", 901 | "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" 902 | }, 903 | "semver": { 904 | "version": "7.5.4", 905 | "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", 906 | "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", 907 | "requires": { 908 | "lru-cache": "^6.0.0" 909 | } 910 | }, 911 | "shellwords": { 912 | "version": "0.1.1", 913 | "resolved": "https://registry.npmjs.org/shellwords/-/shellwords-0.1.1.tgz", 914 | "integrity": "sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww==" 915 | }, 916 | "simple-update-notifier": { 917 | "version": "2.0.0", 918 | "resolved": "https://registry.npmjs.org/simple-update-notifier/-/simple-update-notifier-2.0.0.tgz", 919 | "integrity": "sha512-a2B9Y0KlNXl9u/vsW6sTIu9vGEpfKu2wRV6l1H3XEas/0gUIzGzBoP/IouTcUQbm9JWZLH3COxyn03TYlFax6w==", 920 | "requires": { 921 | "semver": "^7.5.3" 922 | } 923 | }, 924 | "sudo-prompt": { 925 | "version": "9.2.1", 926 | "resolved": "https://registry.npmjs.org/sudo-prompt/-/sudo-prompt-9.2.1.tgz", 927 | "integrity": "sha512-Mu7R0g4ig9TUuGSxJavny5Rv0egCEtpZRNMrZaYS1vxkiIxGiGUwoezU3LazIQ+KE04hTrTfNPgxU5gzi7F5Pw==" 928 | }, 929 | "supports-color": { 930 | "version": "5.5.0", 931 | "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", 932 | "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", 933 | "requires": { 934 | "has-flag": "^3.0.0" 935 | } 936 | }, 937 | "to-regex-range": { 938 | "version": "5.0.1", 939 | "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", 940 | "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", 941 | "requires": { 942 | "is-number": "^7.0.0" 943 | } 944 | }, 945 | "touch": { 946 | "version": "3.1.0", 947 | "resolved": "https://registry.npmjs.org/touch/-/touch-3.1.0.tgz", 948 | "integrity": "sha512-WBx8Uy5TLtOSRtIq+M03/sKDrXCLHxwDcquSP2c43Le03/9serjQBIztjRz6FkJez9D/hleyAXTBGLwwZUw9lA==", 949 | "requires": { 950 | "nopt": "~1.0.10" 951 | } 952 | }, 953 | "undefsafe": { 954 | "version": "2.0.5", 955 | "resolved": "https://registry.npmjs.org/undefsafe/-/undefsafe-2.0.5.tgz", 956 | "integrity": "sha512-WxONCrssBM8TSPRqN5EmsjVrsv4A8X12J4ArBiiayv3DyyG3ZlIg6yysuuSYdZsVz3TKcTg2fd//Ujd4CHV1iA==" 957 | }, 958 | "uuid": { 959 | "version": "8.3.2", 960 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", 961 | "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" 962 | }, 963 | "which": { 964 | "version": "2.0.2", 965 | "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", 966 | "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", 967 | "requires": { 968 | "isexe": "^2.0.0" 969 | } 970 | }, 971 | "yallist": { 972 | "version": "4.0.0", 973 | "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", 974 | "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" 975 | } 976 | } 977 | } 978 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "clipboardconqueror", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "copyconqueror.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1", 8 | "start": "nodemon copyconqueror.js", 9 | "win": "node copyconqueror.js", 10 | "linux": "chmod +x node_modules/clipboard-event/platform/clipboard-event-handler-linux && nodemon copyconqueror.js", 11 | "linuxnomon": "chmod +x node_modules/clipboard-event/platform/clipboard-event-handler-linux && node copyconqueror.js", 12 | "mac": "chmod +x node_modules/clipboard-event/platform/clipboard-event-handler-mac && nodemon copyconqueror.js", 13 | "macnomon": "chmod +x node_modules/clipboard-event/platform/clipboard-event-handler-mac && node copyconqueror.js" 14 | }, 15 | "keywords": [], 16 | "author": "", 17 | "license": "ISC", 18 | "dependencies": { 19 | "axios": "^1.6.0", 20 | "clipboard-event": "^1.6.0", 21 | "copy-paste": "^1.5.3", 22 | "node-global-key-listener": "^0.3.0", 23 | "node-notifier": "^10.0.1", 24 | "nodemon": "^3.0.1" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Clipboard Conqueror: Your Personal AI Copilot 2 | 3 | Welcome to Clipboard Conqueror, a powerful tool that brings the power of Artificial Intelligence to your fingertips. Whether you're a developer, writer, student, or just someone who loves to explore, Clipboard Conqueror is here to help. 4 | 5 | ![Clipboard Conqueror Logo](CCfinal.jpg) 6 | 7 | [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 8 | 9 | ## What is Clipboard Conqueror? 10 | 11 | Clipboard Conqueror is a copy paste Large Language Model interface that works as a personal universal copilot. It simply works anywhere. No need to sign in, no required keys. CC allows you to leverage cutting-edge Artificial Intelligence models to enhance your productivity and creativity. Almost everything is configurable. 12 | 13 | With Clipboard Conqueror, you can simply copy three pipes "`|||`" and a question or command, and it will generate a response into the clipboard that you can paste directly into any text field. 14 | 15 | Clipboard Conqueror works out of the box with 16 | - [KoboldCPP](http://www.github.com/LostRuins/koboldcpp/), 17 | - [Text Generation WebUI](https://github.com/oobabooga/text-generation-webui), 18 | - [Ollama](https://www.ollama.com), 19 | - [LMStudio](https://lmstudio.ai)*closed source 20 | 21 | These local inference servers are generally considered secure and reliable, and can be invoked simply like "|||ollama|" or "|||tgw|". They do not require an internet connection or any sensitive data to be sent on the network. Clipboard Conqeror is very configurable and should be compatible with any inference server. 22 | 23 | CC works online via: 24 | - [Novita AI](https://novita.ai/model-api/product/llm-api?utm_source=ClipboardConqueror&utm_medium=github_readme&utm_campaign=link) 25 | - [01.AI](https://platform.01.ai/) 26 | - [Anyscale](https://www.anyscale.com/) 27 | - [Claude](https://docs.anthropic.com/en/docs/about-claude/models) 28 | - [Fireworks](https://fireworks.ai/) 29 | - [Groq](https://groq.com/) 30 | - [Openrouter](https://openrouter.ai/) 31 | - [Together](https://www.together.ai/) 32 | - [OpenAI's API](https://platform.openai.com/docs/overview) 33 | 34 | Put your key into the appropriate [endpoint.key](https://github.com/aseichter2007/ClipboardConqueror/blob/6e5a09613a27007ae2cf928fceeee3b7c77a2143/setup.js#L310) in setup.js. 35 | 36 | ## Key Features 37 | 38 | ### Summon any text right where you need it. 39 | 40 | - **Control Every Part of LLM Prompts:** Manage and customize every aspect of your AI without leaving your current workspace. 41 | 42 | - **Quickly Prototype Prompts:** Test and refine your prompts quickly for deployment in production environments. 43 | 44 | - **Locally Run Models:** Trust your data with locally run models that do not phone home or report any metrics. 45 | 46 | - **Supports Multiple Inference Endpoints:** Flexibly interface with your favorite inference engines. 47 | 48 | - **No-code Multi-hop Inference Framework** Prepare powerful chain-of-inference prompts for superior responses or prototype workflows for use in multi-step agentic frameworks. 49 | 50 | - **Desktop Platforms** Clipboard Conqueror is designed to work seamlessly across multiple desktop platforms including Windows, macOS, and Linux. It has been gently tested to ensure stability and compatibility. 51 | 52 | - **OpenAI Compatible** local inference engines are not strictly required to use Clipboard Conqueror, it works against ChatGPT API and other internet inference sources. 53 | 54 | - CC provides a whole toolbox of predefined and targeted assistants, ready to work for you. 55 | 56 | - Save system prompts on the fly to draft, define, translate, think, review, or tell jokes. 57 | 58 | 59 | 60 | ## Getting Started 61 | 62 | 1. [Installing Clipboard Conqueror](Readme-Install.md) 63 | 2. [Choosing a Model](Readme-Choosing-A-Model.md) 64 | 3. [Basic Use](Readme-How-To-Use-CC.md) 65 | 4. [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html) 66 | 4. [Prompt Reference](Readme-Prompt-Reference.md) 67 | 5. [Prompt Formatting](Readme-Prompt-Formatting.md) 68 | 6. [API Switching](Readme-Endpoints.md) 69 | 7. [Chain of Inference](Readme-Inference-Chaining.md) 70 | 8. [Setup.js](Readme-Setup.md) 71 | 72 | 73 | ## Privacy Policy 74 | Clipboard Conqueror does not collect any metrics or send any data behind the scenes. When used with local LLMs, no data leaves the local machine. When used with online APIs, please refer to the privacy policy of your chosen host. 75 | 76 | ## Additional Resources 77 | 78 | - [Prompt Tower](https://github.com/backnotprop/prompt-tower) 79 | 80 | This VSCode extension is excellent for building and arranging code prompts, and works seamlessly with CC, just add an invocation to the top of the prompt tower and click Copy Prompt. 81 | 82 | - [Toggle Design Mode Bookmarklet](https://www.reddit.com/r/bookmarklets/comments/d8pqe2/toggle_design_mode/) 83 | 84 | Morph the web on demand as you browse. 85 | - [Effects of Basic Sampling Parameters](https://artefact2.github.io/llm-sampling/index.xhtml) 86 | - [Unofficial Kobold Guide and Model Suggestions](https://docs.google.com/document/d/1I1r-NGIxo3Gt0gfOeqJkTxTQEgPQKmy7UZR5wh_aZlY/edit?pli=1) 87 | - [AMD GPU Resources](https://llm-tracker.info/howto/AMD-GPUs) 88 | - [The Hitchhiker's Guide to LLMs](https://osanseviero.github.io/hackerllama/blog/posts/hitchhiker_guide/) 89 | - [LLMs: How Do They Work?](https://bbycroft.net/llm) 90 | - [OpenHermes 2.5 Mistral Prompting Ideas](https://www.reddit.com/r/LocalLLaMA/comments/18j59g1/you_are_a_helpful_ai_assistant/) 91 | - [Llama 3 Quant Loss](https://github.com/matt-c1/llama-3-quant-comparison) 92 | - [Visual Guide to Quantization](https://newsletter.maartengrootendorst.com/p/a-visual-guide-to-quantization) 93 | 94 | ## Videos 95 | Quick Start Jam, Reccomended: 96 | 97 | [![YouTube Video](https://i.ytimg.com/vi/fcM8dDtVTYQ/hqdefault.jpg)](https://youtu.be/fcM8dDtVTYQ) 98 | 99 | Quick demo: 100 | 101 | [![YouTube Video](https://i.ytimg.com/vi/n8tQJlne3qs/hqdefault.jpg)](https://youtu.be/n8tQJlne3qs) 102 | 103 | Using Clipboard Conqueror to mutate content, and Installation: 104 | 105 | 106 | [![Youtube Video](https://i.ytimg.com/vi/NqnpBi0MHsc/hqdefault.jpg)](https://youtu.be/NqnpBi0MHsc) 107 | 108 | 109 | - *note in this video I mention context leaking with Context Shift, that was my mistake, for a bit I had a bug where |||re| was persisting unexpectedly. 110 | 111 | ## Support the Developer, Please! 112 | 113 | I would very much appreciate a donation and am open to prompt engineering consultation on Fridays. Send me an [email](mailto:aseichter2007@gmail.com?subject=I%20have%20an%20offer%20you%20might%20be%20interested%20in.&body=Hello%2C%20I%20am%20reaching%20out%20to%20inquire%20about%20contracting%20your%20services%20for) if I can help you in any way. 114 | 115 | - [Patreon](https://patreon.com/ClipboardConqueror) 116 | 117 | - [Ko-fi](https://ko-fi.com/aseichter2007) 118 | 119 | or coin: 120 | - BTC: bc1qqpfndachfsdgcc4xxtnc5pnk8y58zjzaxavu27 121 | - DOGE: D7JQNfq2ybDSorYjP7xS2U4hs8PD2UUroY 122 | 123 | If CC really gets rolling and meets my financial needs, I will host a dedicated cluster of my own to ensure free LLM access for anyone on secure and transparent terms with any excess. 124 | 125 | ## Contact 126 | 127 | For bug reports, feature requests, or any other inquiries, please contact me at [clipboard.aseichter2007@gmail.com](mailto:clipboard.aseichter2007@gmail.com?subject=I%20have%20a%20problem%20or%20feature%20request%20regarding%20Clipboard%20Conqueror&body=Hello%2C%0AI%20am%20trying%20to%20%5Bgoal%5D%0A%0Abut%20instead%20%5Bproblem%5D%0A) or open an issue on GitHub. 128 | 129 | If you have a good idea for adding a proper gui to CC, fork it and show me how it's done. I'd like to avoid a heavy web framework. 130 | 131 | ## Acknowlegements 132 | 133 | I'd like to give a special thank you to the creators of KoboldAi, KoboldCPP, Meta, ClosedAi, and the communities that made all this possible to figure out. 134 | 135 | ## Star History 136 | 137 | [![Star History Chart](https://api.star-history.com/svg?repos=aseichter2007/ClipboardConqueror&type=Date)](https://star-history.com/#aseichter2007/ClipboardConqueror&Date) 138 | 139 | Large Language Models: 140 | --- 141 | 142 | ### Every LLM is a little different, it takes some time to get used to how each model should be talked to for the best results. 143 | 144 | LLMs are powerful tools but it's important to understand how they work. The input text is vectorized and put through matrix multiplications and a big complex vector is built up. Then each word is added to that vector as it is chosen in turn one at a time, with some randomity to get better speech flavor, until the next probable token is a stop token or max length is exceeded. 145 | 146 | In an LLM every word is a cloud of numbers that represent how that token relates to other words, concepts or phrase structures. By turning words into numbers, we can then beat them with math and determine which numbers probably are appropriate to go next. 147 | 148 | It doesn't really reason, it doesn't really think, it amplifies patterns and guesses using probabilities and random, each next word chosen with such accuracy and literate complexity that kind of functionally it simulates having thought. An important note: LLMs return a list of probable tokens and their probability, and after the LLM has done the math, one word is selected by user set rules from the returned set. 149 | 150 | LLM models don't make the choice, sampling happens after they do the magic, and then the machine is asked for the next tokens to choose from, ev-ery -to-ke-n - however the words are sliced. 151 | 152 | It's weird, but they have no state, it's data-crunch-out every word in turn, no real consideration. 153 | 154 | Use them effectively within their limits to succeed in 2024. 155 | 156 | You can go find the right data and paste the text at an LLM and it can use that data, but no LLM should be trusted implicitly, just as a first resort, right here aboard the Clipboard Conqueror. 157 | 158 | 159 | ## License 160 | 161 | This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for details. 162 | 163 | ## Disclaimer 164 | 165 | Please use Clipboard Conqueror responsibly and respect copyright and laws in your country while generating content. Misuse of this tool might lead to unintended consequences and breaches of privacy or intellectual property rights. I hold no responsibility for the data that passes through this tool on any system. 166 | 167 | [Install](Readme-Install.md), [Choosing a Model](Readme-Choosing-A-Model.md), [Basic Use](Readme-How-To-Use-CC.md), [Bookmarklets](https://aseichter2007.github.io/ClipboardConqueror/bookmarklets.html), [Prompt Reference](Readme-Prompt-Reference.md), [Prompt Formatting](Readme-Prompt-Formatting.md), [API Switching](Readme-Endpoints.md), [Chaining Inference](Readme-Inference-Chaining.md), [Setup.js](Readme-Setup.md) 168 | 169 | I solemnly promise that this application and at least one compatible backend will function in the absence of internet connectivity. 170 | One of my design inspirations for this application is to spread LLM models to as many computers as possible. I want to ensure at least one intact system is recovered by future archaeologists, a time capsule of culture, science, and data. We should be sending intelligent boxes to deep space too. Our knowledge and posterity must not go to waste. 171 | -------------------------------------------------------------------------------- /responsengine.js: -------------------------------------------------------------------------------- 1 | class ResponseEngine { 2 | constructor( 3 | appSettings, 4 | functionList = [{start:"<|call|>",end:"<|/endCall|>",func: "call"} ], 5 | remove= /<\|[^|]*\|>/g 6 | ) 7 | { 8 | this.appSettings = appSettings; 9 | this.functionList= functionList; 10 | this.remove = remove; 11 | } 12 | call(text){ 13 | console.log("Function called text: " + text); 14 | } 15 | callFunctions(text){ 16 | this.functionList.forEach( fun => { 17 | this.functionCheckExecute( text, fun.start, fun.end, this[ fun.func ] ) 18 | }); 19 | } 20 | functionCheckExecute(text, functionCall, terminator, callback){ 21 | if (text.includes(functionCall)) { 22 | const splitText = text.split(functionCall) 23 | if(splitText.length >=1){ 24 | if (splitText[0].includes(terminator)){ 25 | const outText = splitText[0].split(terminator); 26 | callback(outText[0]) 27 | } 28 | } 29 | } 30 | } 31 | //Add your funtions as methods in this file. 32 | removeChatML(text) { 33 | return text.replace(this.remove, ''); 34 | } 35 | recieveMessageFindTerminatorsAndTrim(text) { 36 | let totals = [] 37 | this.callFunctions(text); 38 | if (this.appSettings.removeFormatting) { 39 | text = this.removeChatML(text); 40 | } 41 | 42 | // let low = 100000; 43 | // let LowPosition = 0 44 | // for (let index = 0; index < totals.length; index++) { 45 | // const element = totals[index]> low; 46 | // if (element < low) { 47 | // low = element; 48 | // LowPosition = index; 49 | // } 50 | // } 51 | // let response = text.split(this.terminators[LowPosition]) 52 | // return response[0]; 53 | return text; 54 | } 55 | 56 | } 57 | module.exports =ResponseEngine; 58 | //todo: this will allow the bot to call a few funtions. I intend to change the message recieved sound so that the ai can emote at the user. Not execute code, but i'm considering some form of RAG -------------------------------------------------------------------------------- /settingSaver.js: -------------------------------------------------------------------------------- 1 | function saveSettings(setting,identities,target,notify,fs) { 2 | 3 | let savedSettings = {}; 4 | 5 | try { 6 | 7 | savedSettings = require('./0prompts.json') 8 | savedSettings = { ...savedSettings, ...setting }; 9 | fs.writeFileSync(target, JSON.stringify(savedSettings)); 10 | console.log("0identies.json updated."); 11 | } catch (err) { 12 | try { 13 | notify("file doesn't exist, writing:", err.message); 14 | console.log(`file doesn't exist, writing: ${err.message}`); 15 | fs.writeFileSync(target, JSON.stringify(identities)); 16 | console.log("0identies.json written."); 17 | } catch (error) { 18 | console.log("error writing " + target + " : " + error); 19 | 20 | } 21 | } 22 | 23 | 24 | } 25 | exports.saveSettings = saveSettings; -------------------------------------------------------------------------------- /x-mac-start-no-nodemon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | npm run macnomon 3 | 4 | chmod +x x-mac-start-no-nodemon.sh 5 | ./x-mac-start-no-nodemon.sh -------------------------------------------------------------------------------- /x-mac-start-nodemon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | npm run mac 3 | 4 | chmod +x x-mac-start-nodemon.sh 5 | ./x--mac-start-nodemon.sh -------------------------------------------------------------------------------- /xy-linux-mac-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | npm i 3 | 4 | chmod +x xy-linux-mac-install.sh 5 | ./xy-linux-mac-install.sh 6 | -------------------------------------------------------------------------------- /xy-linux-mac-update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "${0%/*}" 4 | if [[ ! -x /usr/bin/git ]]; then 5 | echo "Git is not installed on this system. Skipping update." 6 | echo "If you installed with a zip file, you will need to download the new zip and install it manually." 7 | else 8 | git pull --rebase --autostash 9 | if [[ $? != 0 ]]; then 10 | echo "There were errors while updating. Please download the latest version manually." 11 | fi 12 | fi 13 | 14 | npm install 15 | read -n 1 -s -r -p "Press any key to continue..." 16 | cd .. 17 | exit 18 | 19 | chmod +x y-linux-mac-update.sh 20 | ./y-linux-mac-update.sh -------------------------------------------------------------------------------- /y-linux-start-no-nodemon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | npm run linuxnomon 3 | 4 | chmod +x y-linux-start-no-nodemon.sh 5 | ./y-linux-start-no-nodemon.sh -------------------------------------------------------------------------------- /y-linux-start-nodemon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | npm run linux 3 | 4 | chmod +x y-linux-start-nodemon.sh 5 | ./y-linux-start-nodemon.sh -------------------------------------------------------------------------------- /z-instalCC.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | pushd %~dp0 3 | git --version > nul 2>&1 4 | if %errorlevel% neq 0 ( 5 | echo Git is not installed on this system. Skipping update. 6 | echo If you installed with a zip file, you will need to download the new zip and install it manually. 7 | ) else ( 8 | call git pull --rebase --autostash 9 | if %errorlevel% neq 0 ( 10 | REM incase there is still something wrong 11 | echo There were errors while updating. Please download the latest version manually. 12 | ) 13 | ) 14 | call npm install 15 | pause 16 | popd 17 | -------------------------------------------------------------------------------- /z-runCC.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | pushd %~dp0 3 | npm run win 4 | pause 5 | popd 6 | -------------------------------------------------------------------------------- /z-runCCnodeMon.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | pushd %~dp0 3 | npm start 4 | pause 5 | popd 6 | -------------------------------------------------------------------------------- /z-updateCC.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | pushd %~dp0 3 | git --version > nul 2>&1 4 | if %errorlevel% neq 0 ( 5 | echo Git is not installed on this system. Skipping update. 6 | echo If you installed with a zip file, you will need to download the new zip and install it manually. 7 | ) else ( 8 | call git pull --rebase --autostash 9 | if %errorlevel% neq 0 ( 10 | REM incase there is still something wrong 11 | echo There were errors while updating. Please download the latest version manually. 12 | ) 13 | ) 14 | call npm install 15 | pause 16 | popd 17 | --------------------------------------------------------------------------------