├── .dockerignore ├── assets └── screenshot.png ├── src ├── commands │ ├── commands.js │ └── text2img.js ├── index.js └── bot.js ├── .gitignore ├── Dockerfile ├── docker-compose.yml ├── package.json ├── .github └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── Makefile ├── .eslintrc.json ├── eslint.config.mjs ├── .env.example └── README.md /.dockerignore: -------------------------------------------------------------------------------- 1 | package-lock.json 2 | pnpm-lock.yaml 3 | pnpm-lock.yml 4 | node_modules/ 5 | .env 6 | -------------------------------------------------------------------------------- /assets/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/238SAMIxD/discord-ai-bot/HEAD/assets/screenshot.png -------------------------------------------------------------------------------- /src/commands/commands.js: -------------------------------------------------------------------------------- 1 | import text2img from "./text2img.js"; 2 | 3 | export default [text2img]; 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | package-lock.json 2 | pnpm-lock.yaml 3 | pnpm-lock.yml 4 | node_modules/ 5 | .env 6 | .vscode/ 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:20 2 | 3 | COPY . . 4 | RUN npm i --omit=dev --no-package-lock 5 | USER node 6 | 7 | CMD ["node","./src/index.js"] -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | bot: 5 | build: . 6 | env_file: .env 7 | environment: 8 | - OLLAMA=http://host.docker.internal:11434 9 | restart: unless-stopped 10 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "discord-ai-bot", 3 | "main": "src/index.js", 4 | "scripts": { 5 | "start": "node src/index.js" 6 | }, 7 | "dependencies": { 8 | "axios": "^1.6.3", 9 | "discord.js": "^14.14.1", 10 | "dotenv": "^16.3.1", 11 | "meklog": "^1.0.2" 12 | }, 13 | "type": "module", 14 | "devDependencies": { 15 | "@eslint/js": "^9.7.0", 16 | "@types/eslint__js": "^8.42.3", 17 | "eslint": "^9.7.0", 18 | "globals": "^15.8.0" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Describe the feature** 13 | A clear and concise description of the feature. 14 | 15 | **Why do you think this feature should be implemented?** 16 | What does it solve? 17 | 18 | **Additional context** 19 | Add any other context or screenshots about the feature request here. 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report unintended behaviour or an error 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Describe the bug** 13 | A clear and concise description of what the bug is. 14 | 15 | **To Reproduce** 16 | Steps to reproduce the behavior: 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | **Expected behavior** 23 | A clear and concise description of what you expected to happen. 24 | 25 | **Screenshots** 26 | If applicable, add screenshots to help explain your problem. 27 | 28 | **Device** 29 | - OS: (e.g Windows/Linux) 30 | - Repository commit ID (run `git rev-parse HEAD`) 31 | - Ollama version (run `ollama -v`) 32 | 33 | **Additional context** 34 | Add any other context about the problem here. 35 | -------------------------------------------------------------------------------- /src/index.js: -------------------------------------------------------------------------------- 1 | import { ShardingManager, Events } from "discord.js"; 2 | import path from "node:path"; 3 | import { fileURLToPath } from "node:url"; 4 | import { Logger, LogLevel } from "meklog"; 5 | import dotenv from "dotenv"; 6 | 7 | dotenv.config(); 8 | 9 | const production = process.env.NODE_ENV == "prod" || process.env.NODE_ENV == "production"; 10 | const log = new Logger(production, "Shard Manager"); 11 | 12 | log(LogLevel.Info, "Loading"); 13 | 14 | const filePath = path.join(path.dirname(fileURLToPath(import.meta.url)), "bot.js"); 15 | const manager = new ShardingManager(filePath, { token: process.env.TOKEN }); 16 | 17 | manager.on("shardCreate", async shard => { 18 | const shardLog = new Logger(production, `Shard #${shard.id}`); 19 | 20 | shardLog(LogLevel.Info, "Created shard"); 21 | 22 | shard.once(Events.ClientReady, async () => { 23 | shard.send({ shardID: shard.id, logger: shardLog.data }); 24 | 25 | shardLog(LogLevel.Info, "Shard ready"); 26 | }); 27 | }); 28 | 29 | manager.spawn(); 30 | 31 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # As long as you have Make running on your machine you should be able to use this file. 2 | # make runs a given command (e.g. make compose-up) 3 | # Command-names are given by starting a line without a tab and followed by a colon (i.e.':'). 4 | # what the command runs is the line below the colon and that line must start with a tab of size 4. 5 | # Running make without a command after it will run the first command in the file. 6 | 7 | # starts the discord-ai-bot 8 | compose-up: 9 | $(MAKE) setup_env && docker compose -p discord-ai up 10 | 11 | # Stops docker compose without removing the containers from the system. 12 | compose-stop: 13 | docker compose -p discord-ai stop 14 | 15 | # Stops docker compose and removes the containers from the system 16 | compose-down: 17 | docker compose -p discord-ai down 18 | 19 | # Run the local node project with make and without docker 20 | local: 21 | $(MAKE) setup_env && npm i && node ./src/index.js 22 | 23 | # This copies the .env.example (source) file to the .env (destination) file location 24 | # The -n or no clobber means it will not overwrite the .env file if it already exists. 25 | # The || : basically ignores the error code of the previous command and always succeeds. 26 | setup_env: 27 | cp -n ./.env.example ./.env 2>/dev/null || : 28 | -------------------------------------------------------------------------------- /src/commands/text2img.js: -------------------------------------------------------------------------------- 1 | import { SlashCommandBuilder } from "discord.js"; 2 | 3 | const text2img = new SlashCommandBuilder() 4 | .setName("text2img") 5 | .setDescription("Convert text to image") 6 | .addStringOption((option) => 7 | option.setName("prompt").setDescription("Text to convert").setRequired(true) 8 | ) 9 | .addNumberOption((option) => 10 | option 11 | .setName("width") 12 | .setDescription("Width of the image") 13 | .setRequired(false) 14 | .setMinValue(128) 15 | .setMaxValue(1024) 16 | ) 17 | .addNumberOption((option) => 18 | option 19 | .setName("height") 20 | .setDescription("Height of the image") 21 | .setRequired(false) 22 | .setMinValue(128) 23 | .setMaxValue(1024) 24 | ) 25 | .addNumberOption((option) => 26 | option 27 | .setName("steps") 28 | .setDescription("Number of steps") 29 | .setRequired(false) 30 | .setMinValue(5) 31 | .setMaxValue(20) 32 | ) 33 | .addNumberOption((option) => 34 | option 35 | .setName("batch_count") 36 | .setDescription("Batch count") 37 | .setRequired(false) 38 | .setMinValue(1) 39 | .setMaxValue(4) 40 | ) 41 | .addNumberOption((option) => 42 | option 43 | .setName("batch_size") 44 | .setDescription("Batch size") 45 | .setRequired(false) 46 | .setMinValue(1) 47 | .setMaxValue(5) 48 | ) 49 | .addBooleanOption((option) => 50 | option 51 | .setName("enhance_prompt") 52 | .setDescription("Enhance prompt") 53 | .setRequired(false) 54 | ); 55 | 56 | export default text2img; 57 | -------------------------------------------------------------------------------- /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "eslint:recommended", 3 | "env": { 4 | "node": true, 5 | "es6": true 6 | }, 7 | "parserOptions": { 8 | "ecmaVersion": 2021, 9 | "sourceType": "module" 10 | }, 11 | "rules": { 12 | "arrow-spacing": ["warn", { "before": true, "after": true }], 13 | "brace-style": ["error", "1tbs", { "allowSingleLine": true }], 14 | "comma-dangle": ["error", "never"], 15 | "comma-spacing": "error", 16 | "comma-style": "error", 17 | "curly": ["error", "multi-line", "consistent"], 18 | "dot-location": ["error", "property"], 19 | "handle-callback-err": "off", 20 | "indent": ["error", "tab", { "SwitchCase": 1 }], 21 | "keyword-spacing": "error", 22 | "max-nested-callbacks": ["error", { "max": 4 }], 23 | "max-statements-per-line": ["error", { "max": 2 }], 24 | "no-console": "off", 25 | "no-empty": "warn", 26 | "no-empty-function": "error", 27 | "no-floating-decimal": "error", 28 | "no-lonely-if": "error", 29 | "no-multi-spaces": "error", 30 | "no-multiple-empty-lines": ["error", { "max": 2, "maxEOF": 1, "maxBOF": 0 }], 31 | "no-shadow": ["error", { "allow": ["err", "resolve", "reject"] }], 32 | "no-trailing-spaces": ["error"], 33 | "no-var": "error", 34 | "object-curly-spacing": ["error", "always"], 35 | "prefer-const": "error", 36 | "quotes": ["error", "double"], 37 | "semi": ["error", "always"], 38 | "space-before-blocks": "error", 39 | "space-before-function-paren": ["error", { 40 | "anonymous": "never", 41 | "named": "never", 42 | "asyncArrow": "always" 43 | }], 44 | "space-in-parens": "error", 45 | "space-infix-ops": "error", 46 | "space-unary-ops": "error", 47 | "spaced-comment": "error", 48 | "yoda": "error", 49 | "default-case-last": "error", 50 | "switch-colon-spacing": ["error", {"after": true, "before": false}] 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /eslint.config.mjs: -------------------------------------------------------------------------------- 1 | import globals from "globals"; 2 | import path from "node:path"; 3 | import { fileURLToPath } from "node:url"; 4 | import js from "@eslint/js"; 5 | import { FlatCompat } from "@eslint/eslintrc"; 6 | 7 | const __filename = fileURLToPath(import.meta.url); 8 | const __dirname = path.dirname(__filename); 9 | const compat = new FlatCompat({ 10 | baseDirectory: __dirname, 11 | recommendedConfig: js.configs.recommended, 12 | allConfig: js.configs.all 13 | }); 14 | 15 | export default [...compat.extends("eslint:recommended"), { 16 | languageOptions: { 17 | globals: { 18 | ...globals.node 19 | }, 20 | 21 | ecmaVersion: 2021, 22 | sourceType: "module" 23 | }, 24 | 25 | rules: { 26 | "arrow-spacing": ["warn", { 27 | before: true, 28 | after: true 29 | }], 30 | 31 | "brace-style": ["error", "1tbs", { 32 | allowSingleLine: true 33 | }], 34 | 35 | "comma-dangle": ["error", "never"], 36 | "comma-spacing": "error", 37 | "comma-style": "error", 38 | curly: ["error", "multi-line", "consistent"], 39 | "dot-location": ["error", "property"], 40 | "handle-callback-err": "off", 41 | 42 | indent: ["error", "tab", { 43 | SwitchCase: 1 44 | }], 45 | 46 | "keyword-spacing": "error", 47 | 48 | "max-nested-callbacks": ["error", { 49 | max: 4 50 | }], 51 | 52 | "max-statements-per-line": ["error", { 53 | max: 2 54 | }], 55 | 56 | "no-console": "off", 57 | "no-empty": "warn", 58 | "no-empty-function": "error", 59 | "no-floating-decimal": "error", 60 | "no-lonely-if": "error", 61 | "no-multi-spaces": "error", 62 | 63 | "no-multiple-empty-lines": ["error", { 64 | max: 2, 65 | maxEOF: 1, 66 | maxBOF: 0 67 | }], 68 | 69 | "no-shadow": ["error", { 70 | allow: ["err", "resolve", "reject"] 71 | }], 72 | 73 | "no-trailing-spaces": ["error"], 74 | "no-var": "error", 75 | "object-curly-spacing": ["error", "always"], 76 | "prefer-const": "error", 77 | quotes: ["error", "double"], 78 | semi: ["error", "always"], 79 | "space-before-blocks": "error", 80 | 81 | "space-before-function-paren": ["error", { 82 | anonymous: "never", 83 | named: "never", 84 | asyncArrow: "always" 85 | }], 86 | 87 | "space-in-parens": "error", 88 | "space-infix-ops": "error", 89 | "space-unary-ops": "error", 90 | "spaced-comment": "error", 91 | yoda: "error", 92 | "default-case-last": "error", 93 | 94 | "switch-colon-spacing": ["error", { 95 | after: true, 96 | before: false 97 | }] 98 | } 99 | }]; -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # Discord bot token 2 | TOKEN= 3 | 4 | # What language model to use, orca is one of the lower-end models that doesn't require as much computer power as llama2 5 | MODEL=orca 6 | 7 | # Ollama URL 8 | OLLAMA=http://localhost:11434 9 | 10 | # Stable diffusion URL 11 | STABLE_DIFFUSION=http://localhost:7860 12 | 13 | # What Discord channels to enable it in (by ID) 14 | CHANNELS=123456789,987654321 15 | 16 | # System message that the language model can understand 17 | # Feel free to change this 18 | SYSTEM="The current date and time is . 19 | 20 | Basic markdown is supported. 21 | Bold: **bold text here** 22 | Italics: _italic text here_ 23 | Underlined: __underlined text here__ 24 | Strikethrough: ~~strikethrough text here~~ 25 | Spoiler: ||spoiler text here|| 26 | Block quotes: Start the line with a > followed by a space, e.g 27 | > Hello there 28 | 29 | Inline code blocks are supported by surrounding text in backticks, e.g `print('Hello');`, block code is supported by surrounding text in three backticks, e.g ```print('Hello');```. 30 | Surround code that is produced in code blocks. Use a code block with three backticks if the code has multiple lines, otherwise use an inline code block with one backtick. 31 | 32 | Links are supported by wrapping the text in square brackets and the link in parenthesis, e.g [Example](https://example.com) 33 | 34 | Lists are supported by starting the line with a dash followed by a space, e.g - List 35 | Numbered lists are supported by starting the line with a number followed by a dot and a space, e.g 1. List. 36 | Images, links, tables, LaTeX, and anything else is not supported. 37 | 38 | If you need to use the symbols >, |, _, *, ~, @, #, :, `, put a backslash before them to escape them. 39 | 40 | If the user is chatting casually, your responses should be only a few sentences, unless they are asking for help or a question. 41 | Don't use unicode emoji unless needed." 42 | 43 | # Use the system message above? (true/false) 44 | USE_SYSTEM=true 45 | 46 | # Use the model's system message? (true/false) If both are specified, model system message will be first 47 | USE_MODEL_SYSTEM=true 48 | 49 | # Require users to mention the bot to interact with it? (true/false) 50 | REQUIRES_MENTION=true 51 | 52 | # Whether to show a message at the start of a conversation 53 | SHOW_START_OF_CONVERSATION=true 54 | 55 | # Whether to use a random Ollama server or use the first available one 56 | RANDOM_SERVER=false 57 | 58 | # Whether to add a message before the first prompt of the conversation 59 | INITIAL_PROMPT="" 60 | USE_INITIAL_PROMPT=false 61 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |

Discord AI Bot

3 |

Repository is now in maintanance mode - rewriting project to Typescript on typescript branch

4 |

Discord bot to interact with Ollama and AUTOMATIC1111 Stable Diffusion as a chatbot

5 |

Stars

6 |

Discord chat with the bot

7 |
8 | 9 | The project started thanks to [mekb](https://github.com/mekb-turtle). 10 | 11 | ### Set-up instructions 12 | 13 | 1. Install [Node.js](https://nodejs.org) (if you have a package manager, use that instead to install this) 14 | - Make sure to install at least v14 of Node.js 15 | 2. Install [Ollama](https://github.com/jmorganca/ollama) (ditto) 16 | 3. Pull (download) a model, e.g `ollama pull orca` or `ollama pull llama2` 17 | 4. Start Ollama by running `ollama serve` 18 | 5. [Create a Discord bot](https://discord.com/developers/applications) 19 | - Under Application » Bot 20 | - Enable Message Content Intent 21 | - Enable Server Members Intent (for replacing user mentions with the username) 22 | 6. Invite the bot to a server 23 | 1. Go to Application » OAuth2 » URL Generator 24 | 2. Enable `bot` 25 | 3. Enable Send Messages, Read Messages/View Channels, and Read Message History 26 | 4. Under Generated URL, click Copy and paste the URL in your browser 27 | 7. Rename `.env.example` to `.env` and edit the `.env` file 28 | - You can get the token from Application » Bot » Token, **never share this with anyone** 29 | - Make sure to change the model if you aren't using `orca` 30 | - Ollama URL can be kept the same unless you have changed the port 31 | - You can use multiple Ollama servers at the same time by separating the URLs with commas 32 | - Set the channels to the channel ID, comma separated 33 | 1. In Discord, go to User Settings » Advanced, and enable Developer Mode 34 | 2. Right click on a channel you want to use, and click Copy Channel ID 35 | - You can edit the system message the bot uses, or disable it entirely 36 | 8. Install the required dependencies with `npm i` 37 | 9. Start the bot with `npm start` 38 | 10. You can interact with the bot by @mentioning it with your message 39 | 11. Install Stable Diffusion 40 | 12. Run the script `./webui.sh --api --listen` 41 | 42 | ### Set-up instructions with Docker 43 | 44 | 1. Install [Docker](https://docs.docker.com/get-docker/) 45 | - Should be atleast compatible with version 3 of compose (docker engine 1.13.0+) 46 | 2. Repeat steps 2—7 from the other setup instructions 47 | 3. Start the bot with `make compose-up` if you have Make installed 48 | - Otherwise, try `docker compose -p discord-ai up` instead 49 | 4. You can interact with the bot by @mentioning it with your message 50 | -------------------------------------------------------------------------------- /src/bot.js: -------------------------------------------------------------------------------- 1 | import { 2 | Client, 3 | Events, 4 | GatewayIntentBits, 5 | MessageType, 6 | Partials, 7 | REST, 8 | Routes 9 | } from "discord.js"; 10 | import { Logger, LogLevel } from "meklog"; 11 | import dotenv from "dotenv"; 12 | import axios from "axios"; 13 | import commands from "./commands/commands.js"; 14 | 15 | dotenv.config(); 16 | 17 | const model = process.env.MODEL; 18 | const servers = process.env.OLLAMA.split(",").map(url => ({ url: new URL(url), available: true })); 19 | const stableDiffusionServers = process.env.STABLE_DIFFUSION.split(",").map(url => ({ url: new URL(url), available: true })); 20 | const channels = process.env.CHANNELS.split(","); 21 | 22 | if (servers.length == 0) { 23 | throw new Error("No servers available"); 24 | } 25 | 26 | let log; 27 | process.on("message", data => { 28 | if (data.shardID) client.shardID = data.shardID; 29 | if (data.logger) log = new Logger(data.logger); 30 | }); 31 | 32 | const logError = (error) => { 33 | if (error.response) { 34 | let str = `Error ${error.response.status} ${error.response.statusText}: ${error.request.method} ${error.request.path}`; 35 | if (error.response.data?.error) { 36 | str += ": " + error.response.data.error; 37 | } 38 | log(LogLevel.Error, str); 39 | } else { 40 | log(LogLevel.Error, error); 41 | } 42 | }; 43 | 44 | function shuffleArray(array) { 45 | for (let i = array.length - 1; i > 0; i--) { 46 | const j = Math.floor(Math.random() * (i + 1)); 47 | [array[i], array[j]] = [array[j], array[i]]; 48 | } 49 | return array; 50 | } 51 | 52 | async function makeRequest(path, method, data) { 53 | while (servers.filter(server => server.available).length == 0) { 54 | // wait until a server is available 55 | await new Promise(res => setTimeout(res, 1000)); 56 | } 57 | 58 | let error = null; 59 | // randomly loop through the servers available, don't shuffle the actual array because we want to be notified of any updates 60 | let order = new Array(servers.length).fill().map((_, i) => i); 61 | if (randomServer) order = shuffleArray(order); 62 | for (const j in order) { 63 | if (!order.hasOwnProperty(j)) continue; 64 | const i = order[j]; 65 | // try one until it succeeds 66 | try { 67 | // make a request to ollama 68 | if (!servers[i].available) continue; 69 | const url = new URL(servers[i].url); // don't modify the original URL 70 | 71 | servers[i].available = false; 72 | 73 | if (path.startsWith("/")) path = path.substring(1); 74 | if (!url.pathname.endsWith("/")) url.pathname += "/"; // safety 75 | url.pathname += path; 76 | log(LogLevel.Debug, `Making request to ${url}`); 77 | const result = await axios({ 78 | method, url, data, 79 | responseType: "text" 80 | }); 81 | servers[i].available = true; 82 | return result.data; 83 | } catch (err) { 84 | servers[i].available = true; 85 | error = err; 86 | logError(error); 87 | } 88 | } 89 | if (!error) { 90 | throw new Error("No servers available"); 91 | } 92 | throw error; 93 | } 94 | 95 | async function makeStableDiffusionRequest(path, method, data) { 96 | while (stableDiffusionServers.filter(server => server.available).length == 0) { 97 | // wait until a server is available 98 | await new Promise(res => setTimeout(res, 1000)); 99 | } 100 | 101 | let error = null; 102 | // randomly loop through the servers available, don't shuffle the actual array because we want to be notified of any updates 103 | let order = new Array(stableDiffusionServers.length).fill().map((_, i) => i); 104 | if (randomServer) order = shuffleArray(order); 105 | for (const j in order) { 106 | if (!order.hasOwnProperty(j)) continue; 107 | const i = order[j]; 108 | // try one until it succeeds 109 | try { 110 | // make a request to stable diffusion 111 | if (!stableDiffusionServers[i].available) continue; 112 | const url = new URL(stableDiffusionServers[i].url); // don't modify the original URL 113 | 114 | stableDiffusionServers[i].available = false; 115 | 116 | if (path.startsWith("/")) path = path.substring(1); 117 | if (!url.pathname.endsWith("/")) url.pathname += "/"; // safety 118 | url.pathname += path; 119 | log(LogLevel.Debug, `Making stable diffusion request to ${url}`); 120 | const result = await axios({ 121 | method, url, data 122 | }); 123 | stableDiffusionServers[i].available = true; 124 | return result.data; 125 | } catch (err) { 126 | stableDiffusionServers[i].available = true; 127 | error = err; 128 | logError(error); 129 | } 130 | } 131 | if (!error) { 132 | throw new Error("No servers available"); 133 | } 134 | throw error; 135 | } 136 | 137 | const client = new Client({ 138 | intents: [ 139 | GatewayIntentBits.Guilds, 140 | GatewayIntentBits.GuildMessages, 141 | GatewayIntentBits.GuildMembers, 142 | GatewayIntentBits.DirectMessages, 143 | GatewayIntentBits.MessageContent 144 | ], 145 | allowedMentions: { users: [], roles: [], repliedUser: false }, 146 | partials: [ 147 | Partials.Channel 148 | ] 149 | }); 150 | 151 | const rest = new REST({ version: "10" }).setToken(process.env.TOKEN); 152 | 153 | client.once(Events.ClientReady, async () => { 154 | await client.guilds.fetch(); 155 | client.user.setPresence({ activities: [], status: "online" }); 156 | await rest.put(Routes.applicationCommands(client.user.id), { 157 | body: commands 158 | }); 159 | 160 | log(LogLevel.Info, "Successfully reloaded application slash (/) commands."); 161 | }); 162 | 163 | const messages = {}; 164 | 165 | // split text so it fits in a Discord message 166 | function splitText(str, length) { 167 | // trim matches different characters to \s 168 | str = str 169 | .replace(/\r\n/g, "\n").replace(/\r/g, "\n") 170 | .replace(/^\s+|\s+$/g, ""); 171 | const segments = []; 172 | let segment = ""; 173 | let word, suffix; 174 | function appendSegment() { 175 | segment = segment.replace(/^\s+|\s+$/g, ""); 176 | if (segment.length > 0) { 177 | segments.push(segment); 178 | segment = ""; 179 | } 180 | } 181 | // match a word 182 | while ((word = str.match(/^[^\s]*(?:\s+|$)/)) != null) { 183 | suffix = ""; 184 | word = word[0]; 185 | if (word.length == 0) break; 186 | if (segment.length + word.length > length) { 187 | // prioritise splitting by newlines over other whitespaces 188 | if (segment.includes("\n")) { 189 | // append up all but last paragraph 190 | const beforeParagraph = segment.match(/^.*\n/s); 191 | if (beforeParagraph != null) { 192 | const lastParagraph = segment.substring(beforeParagraph[0].length, segment.length); 193 | segment = beforeParagraph[0]; 194 | appendSegment(); 195 | segment = lastParagraph; 196 | continue; 197 | } 198 | } 199 | appendSegment(); 200 | // if word is larger than the split length 201 | if (word.length > length) { 202 | word = word.substring(0, length); 203 | if (length > 1 && word.match(/^[^\s]+$/)) { 204 | // try to hyphenate word 205 | word = word.substring(0, word.length - 1); 206 | suffix = "-"; 207 | } 208 | } 209 | } 210 | str = str.substring(word.length, str.length); 211 | segment += word + suffix; 212 | } 213 | appendSegment(); 214 | return segments; 215 | } 216 | 217 | function getBoolean(str) { 218 | return !!str && str != "false" && str != "no" && str != "off" && str != "0"; 219 | } 220 | 221 | function parseJSONMessage(str) { 222 | return str.split(/[\r\n]+/g).map(line => { 223 | const result = JSON.parse(`"${line}"`); 224 | if (typeof result !== "string") throw new "Invalid syntax in .env file"; 225 | return result; 226 | }).join("\n"); 227 | } 228 | 229 | function parseEnvString(str) { 230 | return typeof str === "string" ? 231 | parseJSONMessage(str).replace(//gi, new Date().toUTCString()) : null; 232 | } 233 | 234 | const customSystemMessage = parseEnvString(process.env.SYSTEM); 235 | const useCustomSystemMessage = getBoolean(process.env.USE_SYSTEM) && !!customSystemMessage; 236 | const useModelSystemMessage = getBoolean(process.env.USE_MODEL_SYSTEM); 237 | const showStartOfConversation = getBoolean(process.env.SHOW_START_OF_CONVERSATION); 238 | const randomServer = getBoolean(process.env.RANDOM_SERVER); 239 | let modelInfo = null; 240 | const initialPrompt = parseEnvString(process.env.INITIAL_PROMPT); 241 | const useInitialPrompt = getBoolean(process.env.USE_INITIAL_PROMPT) && !!initialPrompt; 242 | 243 | const requiresMention = getBoolean(process.env.REQUIRES_MENTION); 244 | 245 | async function replySplitMessage(replyMessage, content) { 246 | const responseMessages = splitText(content, 2000).map(text => ({ content: text })); 247 | 248 | const replyMessages = []; 249 | for (let i = 0; i < responseMessages.length; ++i) { 250 | if (i == 0) { 251 | replyMessages.push(await replyMessage.reply(responseMessages[i])); 252 | } else { 253 | replyMessages.push(await replyMessage.channel.send(responseMessages[i])); 254 | } 255 | } 256 | return replyMessages; 257 | } 258 | 259 | client.on(Events.MessageCreate, async message => { 260 | let typing = false; 261 | try { 262 | await message.fetch(); 263 | 264 | // return if not in the right channel 265 | const channelID = message.channel.id; 266 | if (message.guild && !channels.includes(channelID)) return; 267 | 268 | // return if user is a bot, or non-default message 269 | if (!message.author.id) return; 270 | if (message.author.bot || message.author.id == client.user.id) return; 271 | 272 | const botRole = message.guild?.members?.me?.roles?.botRole; 273 | const myMention = new RegExp(`<@((!?${client.user.id}${botRole ? `)|(&${botRole.id}` : ""}))>`, "g"); // RegExp to match a mention for the bot 274 | 275 | if (typeof message.content !== "string" || message.content.length == 0) { 276 | return; 277 | } 278 | 279 | let context = null; 280 | if (message.type == MessageType.Reply) { 281 | const reply = await message.fetchReference(); 282 | if (!reply) return; 283 | if (reply.author.id != client.user.id) return; 284 | if (messages[channelID] == null) return; 285 | if ((context = messages[channelID][reply.id]) == null) return; 286 | } else if (message.type != MessageType.Default) { 287 | return; 288 | } 289 | 290 | // fetch info about the model like the template and system message 291 | if (modelInfo == null) { 292 | modelInfo = (await makeRequest("/api/show", "post", { 293 | name: model 294 | })); 295 | if (typeof modelInfo === "string") modelInfo = JSON.parse(modelInfo); 296 | if (typeof modelInfo !== "object") throw "failed to fetch model information"; 297 | } 298 | 299 | const systemMessages = []; 300 | 301 | if (useModelSystemMessage && modelInfo.system) { 302 | systemMessages.push(modelInfo.system); 303 | } 304 | 305 | if (useCustomSystemMessage) { 306 | systemMessages.push(customSystemMessage); 307 | } 308 | 309 | // join them together 310 | const systemMessage = systemMessages.join("\n\n"); 311 | 312 | // deal with commands first before passing to LLM 313 | let userInput = message.content 314 | .replace(new RegExp("^s*" + myMention.source, ""), "").trim(); 315 | 316 | // may change this to slash commands in the future 317 | // i'm using regular text commands currently because the bot interacts with text content anyway 318 | if (userInput.startsWith(".")) { 319 | const args = userInput.substring(1).split(/\s+/g); 320 | const cmd = args.shift(); 321 | switch (cmd) { 322 | case "reset": 323 | case "clear": 324 | if (messages[channelID] != null) { 325 | // reset conversation 326 | const cleared = messages[channelID].amount; 327 | 328 | // clear 329 | delete messages[channelID]; 330 | 331 | if (cleared > 0) { 332 | await message.reply({ content: `Cleared conversation of ${cleared} messages` }); 333 | break; 334 | } 335 | } 336 | await message.reply({ content: "No messages to clear" }); 337 | break; 338 | case "help": 339 | case "?": 340 | case "h": 341 | await message.reply({ content: "Commands:\n- `.reset` `.clear`\n- `.help` `.?` `.h`\n- `.ping`\n- `.model`\n- `.system`" }); 342 | break; 343 | case "model": 344 | await message.reply({ 345 | content: `Current model: ${model}` 346 | }); 347 | break; 348 | case "system": 349 | await replySplitMessage(message, `System message:\n\n${systemMessage}`); 350 | break; 351 | case "ping": 352 | // get ms difference 353 | try { 354 | const beforeTime = Date.now(); 355 | const reply = await message.reply({ content: "Ping" }); 356 | const afterTime = Date.now(); 357 | const difference = afterTime - beforeTime; 358 | await reply.edit({ content: `Ping: ${difference}ms` }); 359 | } catch (error) { 360 | logError(error); 361 | await message.reply({ content: "Error, please check the console" }); 362 | } 363 | break; 364 | case "": 365 | break; 366 | default: 367 | await message.reply({ content: "Unknown command, type `.help` for a list of commands" }); 368 | break; 369 | } 370 | return; 371 | } 372 | 373 | if (message.type == MessageType.Default && (requiresMention && message.guild && !message.content.match(myMention))) return; 374 | 375 | if (message.guild) { 376 | await message.guild.channels.fetch(); 377 | await message.guild.members.fetch(); 378 | } 379 | 380 | userInput = userInput 381 | .replace(myMention, "") 382 | .replace(/<#([0-9]+)>/g, (_, id) => { 383 | if (message.guild) { 384 | const chn = message.guild.channels.cache.get(id); 385 | if (chn) return `#${chn.name}`; 386 | } 387 | return "#unknown-channel"; 388 | }) 389 | .replace(/<@!?([0-9]+)>/g, (_, id) => { 390 | if (id == message.author.id) return message.author.username; 391 | if (message.guild) { 392 | const mem = message.guild.members.cache.get(id); 393 | if (mem) return `@${mem.user.username}`; 394 | } 395 | return "@unknown-user"; 396 | }) 397 | .replace(/<:([a-zA-Z0-9_]+):([0-9]+)>/g, (_, name) => { 398 | return `emoji:${name}:`; 399 | }) 400 | .trim(); 401 | 402 | if (userInput.length == 0) return; 403 | 404 | // Process text files if attached 405 | if (message.attachments.size > 0) { 406 | const textAttachments = Array.from(message.attachments, ([, value]) => value).filter(att => att.contentType.startsWith("text")); 407 | if (textAttachments.length > 0) { 408 | try { 409 | await Promise.all(textAttachments.map(async (att, i) => { 410 | const response = await axios.get(att.url); 411 | userInput += `\n${i + 1}. File - ${att.name}:\n${response.data}`; 412 | })); 413 | } catch (error) { 414 | log(LogLevel.Error, `Failed to download text files: ${error}`); 415 | await message.reply({ content: "Failed to download text files" }); 416 | return; // Stop processing if file download fails 417 | } 418 | } 419 | } 420 | 421 | // create conversation 422 | if (messages[channelID] == null) { 423 | messages[channelID] = { amount: 0, last: null }; 424 | } 425 | 426 | // log user's message 427 | log(LogLevel.Debug, `${message.guild ? `#${message.channel.name}` : "DMs"} - ${message.author.username}: ${userInput}`); 428 | 429 | // start typing 430 | typing = true; 431 | await message.channel.sendTyping(); 432 | let typingInterval = setInterval(async () => { 433 | try { 434 | await message.channel.sendTyping(); 435 | } catch (error) { 436 | logError(error); 437 | if (typingInterval != null) { 438 | clearInterval(typingInterval); 439 | } 440 | typingInterval = null; 441 | } 442 | }, 7000); 443 | 444 | let response; 445 | try { 446 | // context if the message is not a reply 447 | if (context == null) { 448 | context = messages[channelID].last; 449 | } 450 | 451 | if (useInitialPrompt && messages[channelID].amount == 0) { 452 | userInput = `${initialPrompt}\n\n${userInput}`; 453 | log(LogLevel.Debug, "Adding initial prompt to message"); 454 | } 455 | 456 | // make request to model 457 | response = (await makeRequest("/api/generate", "post", { 458 | model: model, 459 | prompt: userInput, 460 | system: systemMessage, 461 | context 462 | })); 463 | 464 | if (typeof response != "string") { 465 | log(LogLevel.Debug, response); 466 | throw new TypeError("response is not a string, this may be an error with ollama"); 467 | } 468 | 469 | response = response.split("\n").filter(e => !!e).map(e => { 470 | return JSON.parse(e); 471 | }); 472 | } catch (error) { 473 | if (typingInterval != null) { 474 | clearInterval(typingInterval); 475 | } 476 | typingInterval = null; 477 | throw error; 478 | } 479 | 480 | if (typingInterval != null) { 481 | clearInterval(typingInterval); 482 | } 483 | typingInterval = null; 484 | 485 | let responseText = response.map(e => e.response).filter(e => e != null).join("").trim(); 486 | if (responseText.length == 0) { 487 | responseText = "(No response)"; 488 | } 489 | 490 | log(LogLevel.Debug, `Response: ${responseText}`); 491 | 492 | const prefix = showStartOfConversation && messages[channelID].amount == 0 ? 493 | "> This is the beginning of the conversation, type `.help` for help.\n\n" : ""; 494 | 495 | // reply (will automatically stop typing) 496 | const replyMessageIDs = (await replySplitMessage(message, `${prefix}${responseText}`)).map(msg => msg.id); 497 | 498 | // add response to conversation 499 | context = response.filter(e => e.done && e.context)[0].context; 500 | for (let i = 0; i < replyMessageIDs.length; ++i) { 501 | messages[channelID][replyMessageIDs[i]] = context; 502 | } 503 | messages[channelID].last = context; 504 | ++messages[channelID].amount; 505 | } catch (error) { 506 | if (typing) { 507 | try { 508 | // return error 509 | await message.reply({ content: "Error, please check the console" }); 510 | } catch (ignored) { 511 | logError(ignored); 512 | } 513 | } 514 | logError(error); 515 | } 516 | }); 517 | 518 | client.on(Events.InteractionCreate, async (interaction) => { 519 | if (!interaction.isCommand()) return; 520 | 521 | const { commandName, options } = interaction; 522 | 523 | switch (commandName) { 524 | case "text2img": 525 | try { 526 | const prompt = options.getString("prompt"); 527 | const width = options.getNumber("width") || 256; 528 | const height = options.getNumber("height") || 256; 529 | const steps = options.getNumber("steps") || 10; 530 | const batch_count = options.getNumber("batch_count") || 1; 531 | const batch_size = options.getNumber("batch_size") || 1; 532 | const enhance_prompt = (options.getBoolean("enhance_prompt") && true) ? "yes" : "no"; 533 | 534 | await interaction.deferReply(); 535 | const stableDiffusionResponse = await makeStableDiffusionRequest( 536 | "/sdapi/v1/txt2img", 537 | "post", 538 | { 539 | prompt, 540 | width, 541 | height, 542 | steps, 543 | num_inference_steps: steps, 544 | batch_count, 545 | batch_size, 546 | enhance_prompt 547 | } 548 | ); 549 | const images = stableDiffusionResponse.images.map((image) => 550 | Buffer.from(image, "base64") 551 | ); 552 | await interaction.editReply({ 553 | content: `Here are images from prompt \`${prompt}\``, 554 | files: images 555 | }); 556 | } catch (error) { 557 | logError(error); 558 | await interaction.editReply({ 559 | content: "Error, please check the console" 560 | }); 561 | } 562 | break; 563 | } 564 | }); 565 | 566 | client.login(process.env.TOKEN); 567 | --------------------------------------------------------------------------------