├── .github
└── workflows
│ └── compile.yml
├── .gitignore
├── .idx
├── .gitignore
└── dev.nix
├── README.md
├── bots
├── chatgpt.ts
├── claude_3.ts
├── gemini.ts
├── gpt_4.ts
├── gpt_4o.ts
├── llama_3.ts
└── mixtral.ts
├── changelog.md
├── client.ts
├── lib
├── eval.ts
├── eval_worker.js
├── importLLMFile.ts
└── sanity_check.ts
├── main.d.ts
├── main.ts
├── prototype
├── README.md
└── fixGPTCode.ts
├── slashcode.ts
└── vdb.ts
/.github/workflows/compile.yml:
--------------------------------------------------------------------------------
1 | name: Deno
2 |
3 | on:
4 | push:
5 | branches: ["main"]
6 | pull_request:
7 | branches: ["main"]
8 |
9 | permissions:
10 | contents: read
11 |
12 | jobs:
13 | test:
14 | runs-on: ubuntu-latest
15 |
16 | steps:
17 | - name: Setup repo
18 | uses: actions/checkout@v3
19 |
20 | - name: Setup Deno
21 | uses: denoland/setup-deno@v1
22 | with:
23 | deno-version: vx.x.x
24 |
25 | - name: Compile LLM Bot (Windows)
26 | run: deno compile --no-check --allow-all --unstable --target x86_64-pc-windows-msvc --output compiled/llm-bot.exe main.ts
27 |
28 | - name: Compile LLM Bot (Linux, x86)
29 | run: deno compile --no-check --allow-all --unstable --target x86_64-unknown-linux-gnu --output compiled/llm-bot.x86_64 main.ts
30 |
31 | - name: Upload LLM Bot Artifact (Windows)
32 | uses: actions/upload-artifact@v3.1.2
33 | with:
34 | name: llm-bot-windows
35 | path: compiled/llm-bot.exe
36 |
37 | - name: Upload LLM Bot Artifact (Linux)
38 | uses: actions/upload-artifact@v3.1.2
39 | with:
40 | name: llm-bot-linux
41 | path: compiled/llm-bot.x86_64
42 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.env
2 | /.vscode
3 | db.*
4 | bots/rpgpt_*
5 | llmbot.*
--------------------------------------------------------------------------------
/.idx/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | gc/
3 |
--------------------------------------------------------------------------------
/.idx/dev.nix:
--------------------------------------------------------------------------------
1 |
2 | { pkgs, ... }: {
3 |
4 | # Which nixpkgs channel to use.
5 | channel = "stable-23.11"; # or "unstable"
6 |
7 | # Use https://search.nixos.org/packages to find packages
8 | packages = [
9 | pkgs.deno
10 | ];
11 |
12 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # llm-bot
2 |
3 | A Discord bot for interacting with LLMs.
4 |
5 | ## Installation tutorial
6 |
7 | 1. Create a .env file.
8 | 2. Populate the file with these values:
9 |
10 | ```
11 | DISCORD_TOKEN=""
12 | OPENAI_API_KEY=""
13 | BING_COOKIE=""
14 | GEMINI_API_KEY=""
15 | APP_ID=""
16 | SUPABASE_URL=""
17 | SUPABASE_SERVICE_ROLE_KEY=""
18 | ```
19 |
20 | 3. Run the bot using Deno:
21 | `deno run --allow-net --allow-read --allow-env main.ts`
3.1: Or, run the
22 | direct file how you normally would. Check Github Actions for the files you'll
23 | need.
24 |
--------------------------------------------------------------------------------
/bots/chatgpt.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | const tools: types.Tool[] = [{
4 | type: "function",
5 | function: {
6 | name: "sanitycheck",
7 | description:
8 | "Returns true, as a sanity check to make sure function support is OK. If this fails, something's fucked.",
9 | parameters: {
10 | type: "object",
11 | properties: {
12 | useless: {
13 | type: "string",
14 | description:
15 | "You can put whatever here, it's not going to do anything.",
16 | },
17 | },
18 | required: ["useless"],
19 | },
20 | },
21 | }];
22 |
23 | export const information: types.information = {
24 | llmFileVersion: "1.0",
25 | env: ["OPENAI_API_KEY"],
26 | functions: true,
27 | functionsData: tools,
28 | multiModal: false,
29 | callbackSupport: true,
30 | streamingSupport: false,
31 | id: "gpt3.5",
32 | name: "GPT-3.5",
33 | description:
34 | "OpenAI's original flagship chat model, a low cost and quick to use model for general purposes.",
35 | highCostLLM: false,
36 | };
37 |
38 | async function doTools(
39 | res: types.Response,
40 | callback?:
41 | | ((information: types.callbackData, complete: boolean) => void)
42 | | null,
43 | requirements?: types.Requirements,
44 | ): Promise {
45 | if (res.choices[0].finish_reason !== "tool_calls") {
46 | throw "What The Shit?";
47 | }
48 |
49 | const toolCalls = res.choices[0].message.tool_calls!;
50 |
51 | // What if they happen to use it later?
52 | // deno-lint-ignore require-await
53 | const promises = toolCalls.map(async (tool) => {
54 | if (tool.function.name === "sanitycheck") {
55 | return {
56 | role: "tool",
57 | content: "true",
58 | tool_call_id: tool.id,
59 | };
60 | } else {
61 | return {
62 | role: "tool",
63 | content: "Unknown tool or not implemented",
64 | tool_call_id: tool.id,
65 | //};
66 | };
67 | }
68 | });
69 |
70 | // Use Promise.all to wait for all promises to resolve
71 | const results = await Promise.all(promises);
72 |
73 | results.forEach((result) => {
74 | res.messages.push(result);
75 | });
76 |
77 | const newres = await send(null, res.messages, callback, requirements);
78 |
79 | console.log(newres);
80 |
81 | return newres;
82 | }
83 |
84 | export async function send(
85 | prompt: string | null,
86 | messages: types.Message[],
87 | callback?:
88 | | ((information: types.callbackData, complete: boolean) => void)
89 | | null,
90 | requirements?: types.Requirements,
91 | ): Promise {
92 | if (!requirements?.env?.OPENAI_API_KEY) {
93 | throw new DOMException("env.OPENAI_API_KEY", "NotFoundError");
94 | }
95 |
96 | if (requirements.streaming) {
97 | throw new DOMException("streaming", "NotSupportedError");
98 | }
99 |
100 | if (messages.length === 0) {
101 | messages.push({
102 | role: "system",
103 | content: "You are ChatGPT, an LLM by OpenAI.",
104 | });
105 | }
106 |
107 | if (prompt) {
108 | messages.push({
109 | role: "user",
110 | content: prompt,
111 | });
112 | }
113 |
114 | const res = await fetch("https://api.openai.com/v1/chat/completions", {
115 | method: "POST",
116 | headers: {
117 | "Content-Type": "application/json",
118 | Authorization: `Bearer ${requirements?.env.OPENAI_API_KEY}`,
119 | },
120 | body: JSON.stringify({
121 | model: "gpt-3.5-turbo",
122 | messages: messages,
123 | tools,
124 | }),
125 | });
126 |
127 | let resp: types.Response = await res.json();
128 |
129 | if (resp.error) {
130 | throw new DOMException(resp.error.message, "ExecutionError");
131 | }
132 |
133 | messages.push(resp.choices[0].message);
134 |
135 | resp.messages = messages;
136 |
137 | if (resp.choices[0].finish_reason === "tool_calls") {
138 | if (callback) {
139 | callback({
140 | toolCalls: resp.choices[0].message.tool_calls,
141 | data: resp.choices[0].message.content,
142 | }, false);
143 | }
144 | resp = await doTools(resp, null, requirements);
145 | resp.choices[0].message.content = resp.choices[0].message.content as string;
146 | }
147 |
148 | if (callback) callback({ data: resp.choices[0].message.content }, true);
149 |
150 | return resp;
151 | }
152 |
--------------------------------------------------------------------------------
/bots/claude_3.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | type ClaudeResponse = {
4 | id: string;
5 | type: string;
6 | role: string;
7 | content: {
8 | type: string;
9 | text: string;
10 | }[];
11 | model: string;
12 | stop_reason: string | null;
13 | stop_sequence: null;
14 | usage: {
15 | input_tokens: number;
16 | output_tokens: number;
17 | };
18 | };
19 |
20 | export const information: types.information = {
21 | llmFileVersion: "1.0",
22 | env: ["ANTHROPIC_API_KEY"],
23 | functions: false,
24 | multiModal: false, // Listen man I just want to get this to work. I will fix the images later. SHUT UP. IT FUNCTIONS.
25 | callbackSupport: true,
26 | streamingSupport: false,
27 | id: "c3opus",
28 | name: "Claude 3 Opus",
29 | description: "A very strong LLM for answering questions.",
30 | highCostLLM: true,
31 | };
32 |
33 | // const db = await Deno.openKv("./db.sqlite")
34 |
35 | /*function arrayBufferToBase64(buffer: ArrayBuffer) {
36 | let binary = "";
37 | const bytes = new Uint8Array(buffer);
38 | const len = bytes.byteLength;
39 | for (let i = 0; i < len; i++) {
40 | binary += String.fromCharCode(bytes[i]);
41 | }
42 | return btoa(binary); // Base64 encode the binary string
43 | }*/
44 |
45 | export async function send(
46 | prompt: string | null,
47 | messages: types.Message[],
48 | callback?:
49 | | ((information: types.callbackData, complete: boolean) => void)
50 | | null,
51 | requirements?: types.Requirements,
52 | ): Promise {
53 | // here we go
54 |
55 | if (!requirements?.env?.ANTHROPIC_API_KEY) {
56 | throw new DOMException("env.OPENAI_API_KEY", "NotFoundError");
57 | }
58 |
59 | if (requirements.streaming) {
60 | throw new DOMException("streaming", "NotSupportedError");
61 | }
62 |
63 | const sysprompt =
64 | "You are Claude 3 Opus, an LLM by Anthropic. You are running through a Discord bot named LLM Bot, by Eris.";
65 |
66 | /*const contentarr: types.AnthropicContentPart[] = [];
67 |
68 | if (requirements.images) {
69 | // Use map to create an array of promises
70 | const imagePromises = requirements.images.map(async (image_url) => {
71 | const img = await fetch(image_url);
72 |
73 | const imageType = img.headers.get("Content-Type");
74 |
75 | if (!imageType || !imageType.startsWith("image/")) {
76 | throw new Error(
77 | "Whoever's managing the images for this did an absolutely terrible job.",
78 | );
79 | } else {
80 | const imageArrayBuffer = await img.arrayBuffer();
81 | console.log(imageArrayBuffer);
82 | const imageData = arrayBufferToBase64(imageArrayBuffer);
83 | contentarr.push({
84 | type: "image",
85 | source: {
86 | type: "base64",
87 | media_type: imageType,
88 | data: imageData,
89 | },
90 | });
91 | console.log(imageType);
92 | }
93 | });
94 |
95 | // Wait for all the image processing to complete
96 | await Promise.all(imagePromises);
97 | }
98 |
99 | // After all images are processed, push the text content
100 | contentarr.push({
101 | type: "text",
102 | text: prompt!,
103 | });
104 |
105 | let msg: ClaudeMessage = {
106 | role: "user",
107 | content: contentarr,
108 | };
109 |
110 | messages.push(msg);*/
111 |
112 | messages.push({
113 | role: "user",
114 | content: prompt,
115 | });
116 |
117 | const res = await fetch("https://api.anthropic.com/v1/messages", {
118 | method: "POST",
119 | headers: {
120 | "Content-Type": "application/json",
121 | "x-api-key": `${requirements.env.ANTHROPIC_API_KEY}`,
122 | "anthropic-version": "2023-06-01",
123 | },
124 | body: JSON.stringify({
125 | model: "claude-3-opus-20240229",
126 | max_tokens: 4096,
127 | messages: messages,
128 | system: sysprompt,
129 | }),
130 | });
131 |
132 | const resp: ClaudeResponse = await res.json();
133 |
134 | messages.push({ role: "assistant", content: resp.content[0].text });
135 |
136 | if (callback) callback({ data: resp.content[0].text }, true);
137 |
138 | return {
139 | id: resp.id,
140 | choices: [{
141 | finish_reason: resp.stop_reason,
142 | message: {
143 | content: resp.content[0].text,
144 | role: "assistant",
145 | },
146 | }],
147 | messages: messages,
148 | created: Date.now(),
149 | model: resp.model,
150 | object: "chat.completion",
151 | };
152 | }
153 |
--------------------------------------------------------------------------------
/bots/gemini.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | if (!Deno.env.get("OPENAI_API_KEY")) {
4 | console.warn("No OpenAI API key provided! ChatGPT will be unavailable.");
5 | isEnabled = false;
6 | }
7 |
8 | // const db = await Deno.openKv("./db.sqlite")
9 |
10 | async function processGeminiMessages(
11 | messages: types.Message[],
12 | ): Promise<(types.GeminiContentPartImage | types.GeminiContentPartText)[]> {
13 | const geminiFormattedMessages = [];
14 |
15 | for (const message of messages) {
16 | if (message.role === "system") {
17 | geminiFormattedMessages.push({ text: `System: ${message.content}` });
18 | } else if (message.role === "assistant") {
19 | geminiFormattedMessages.push({ text: `Model: ${message.content}` });
20 | } else if (message.role === "user") {
21 | geminiFormattedMessages.push({ text: `User: ${message.content}` });
22 | } else if (message.role === "image") {
23 | message.content = message.content as string;
24 | const imageData = await getImageData(message.content);
25 |
26 | geminiFormattedMessages.push({
27 | inlineData: {
28 | mimeType: imageData?.contentType!,
29 | data: imageData?.base64ImageData!,
30 | },
31 | });
32 | }
33 | }
34 |
35 | // All asynchronous operations inside the loop have completed here
36 | return geminiFormattedMessages;
37 | }
38 |
39 | async function getImageData(url: string) {
40 | try {
41 | const response = await fetch(url);
42 |
43 | const contentType = response.headers.get("Content-Type");
44 |
45 | const blob = await response.blob();
46 |
47 | const reader = new FileReader();
48 | reader.readAsDataURL(blob);
49 |
50 | // Step 5: Wait for the FileReader to finish
51 | await new Promise((resolve, reject) => {
52 | reader.onloadend = resolve;
53 | reader.onerror = reject;
54 | });
55 |
56 | // Step 6: Get the base64-encoded image data
57 |
58 | const resultString = reader.result as string;
59 |
60 | const base64ImageData = resultString.split(",")[1];
61 |
62 | return { contentType, base64ImageData };
63 | } catch (error) {
64 | console.error("Error:", error);
65 | }
66 | }
67 |
68 | export async function send(
69 | messages: types.Message[],
70 | prompt: string | null,
71 | images: string[],
72 | ): Promise {
73 | // here we go
74 |
75 | if (!isEnabled) {
76 | throw "not_enabled";
77 | }
78 |
79 | if (messages.length === 0) {
80 | messages.push({
81 | role: "system",
82 | content:
83 | "You are Gemini Pro, a model created by Google. You are being proxied through Discord by a bot made by Eris. You can use basic markdown to structure your responses. No need to prefix your messages with emojis or what you are (Assisant/Model), we do that automatically. (Seriously, don't do that!!!) Everything that follows is messages, NOT examples.",
84 | });
85 | }
86 |
87 | if (prompt !== null) {
88 | messages.push({
89 | role: "user",
90 | content: prompt,
91 | });
92 | }
93 |
94 | images.forEach((image) => {
95 | messages.push({
96 | role: "image",
97 | content: image,
98 | });
99 | });
100 |
101 | let useImageModel = false;
102 |
103 | console.log(useImageModel);
104 |
105 | // Check if any object has the specified property set to the target value
106 | for (let i = 0; i < messages.length; i++) {
107 | if (messages[i].role === "image") {
108 | useImageModel = true;
109 | break; // Stop the loop since we found a match
110 | }
111 | }
112 |
113 | let geminiFormattedMessages:
114 | (types.GeminiContentPartText | types.GeminiContentPartImage)[] = [];
115 |
116 | geminiFormattedMessages = await processGeminiMessages(messages);
117 |
118 | // Gemini message system is a motherfucker and I hate it but we gotta deal with it. Messages look like this:
119 |
120 | /*
121 | {
122 | 'text': 'User: Whats 9 + 10?'
123 | },
124 | {
125 | 'text': 'Model: 21'
126 | }
127 | */
128 |
129 | const res = await fetch(
130 | `https://generativelanguage.googleapis.com/v1beta/models/${
131 | useImageModel === true ? "gemini-1.0-pro-vision" : "gemini-1.0-pro"
132 | }:generateContent?key=${Deno.env.get("GEMINI_API_KEY")}`,
133 | {
134 | method: "POST",
135 | headers: {
136 | "Content-Type": "application/json",
137 | },
138 | body: JSON.stringify({
139 | "contents": [
140 | {
141 | "parts": geminiFormattedMessages,
142 | },
143 | ],
144 | "generationConfig": {
145 | "temperature": 0.9,
146 | "topK": 1,
147 | "topP": 1,
148 | "maxOutputTokens": 2048,
149 | "stopSequences": [],
150 | },
151 | "safetySettings": [
152 | {
153 | "category": "HARM_CATEGORY_HARASSMENT",
154 | "threshold": "BLOCK_MEDIUM_AND_ABOVE",
155 | },
156 | {
157 | "category": "HARM_CATEGORY_HATE_SPEECH",
158 | "threshold": "BLOCK_MEDIUM_AND_ABOVE",
159 | },
160 | {
161 | "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
162 | "threshold": "BLOCK_MEDIUM_AND_ABOVE",
163 | },
164 | {
165 | "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
166 | "threshold": "BLOCK_MEDIUM_AND_ABOVE",
167 | },
168 | ],
169 | }),
170 | },
171 | );
172 |
173 | const resp: types.geminiResponse = await res.json();
174 |
175 | messages.push({
176 | role: "assistant",
177 | content: resp.candidates[0].content.parts[0].text,
178 | });
179 |
180 | return {
181 | resp,
182 | messages,
183 | };
184 | }
185 |
--------------------------------------------------------------------------------
/bots/gpt_4.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | const tools: types.Tool[] = [{
4 | type: "function",
5 | function: {
6 | name: "sanitycheck",
7 | description:
8 | "Returns true, as a sanity check to make sure function support is OK. If this fails, something's fucked.",
9 | parameters: {
10 | type: "object",
11 | properties: {
12 | useless: {
13 | type: "string",
14 | description:
15 | "You can put whatever here, it's not going to do anything.",
16 | },
17 | },
18 | required: ["useless"],
19 | },
20 | },
21 | }];
22 |
23 | export const information: types.information = {
24 | llmFileVersion: "1.0",
25 | env: ["OPENAI_API_KEY"],
26 | functions: true,
27 | functionsData: tools,
28 | multiModal: true,
29 | callbackSupport: true,
30 | streamingSupport: false,
31 | id: "gpt4",
32 | name: "GPT-4",
33 | description: "OpenAI's most powerful model, with vision support.",
34 | highCostLLM: true,
35 | };
36 |
37 | async function doTools(
38 | res: types.Response,
39 | callback?:
40 | | ((information: types.callbackData, complete: boolean) => void)
41 | | null,
42 | requirements?: types.Requirements,
43 | ): Promise {
44 | if (res.choices[0].finish_reason !== "tool_calls") {
45 | throw "What The Shit?";
46 | }
47 |
48 | const toolCalls = res.choices[0].message.tool_calls!;
49 |
50 | // What if they happen to use it later?
51 | // deno-lint-ignore require-await
52 | const promises = toolCalls.map(async (tool) => {
53 | if (tool.function.name === "sanitycheck") {
54 | return {
55 | role: "tool",
56 | content: "true",
57 | tool_call_id: tool.id,
58 | };
59 | } else {
60 | return {
61 | role: "tool",
62 | content: "Unknown tool or not implemented",
63 | tool_call_id: tool.id,
64 | //};
65 | };
66 | }
67 | });
68 |
69 | // Use Promise.all to wait for all promises to resolve
70 | const results = await Promise.all(promises);
71 |
72 | results.forEach((result) => {
73 | res.messages.push(result);
74 | });
75 |
76 | const newres = await send(null, res.messages, callback, requirements);
77 |
78 | return newres;
79 | }
80 |
81 | export async function send(
82 | prompt: string | null,
83 | messages: types.Message[],
84 | callback?:
85 | | ((information: types.callbackData, complete: boolean) => void)
86 | | null,
87 | requirements?: types.Requirements,
88 | ): Promise {
89 | if (!requirements?.env?.OPENAI_API_KEY) {
90 | throw new DOMException("env.OPENAI_API_KEY", "NotFoundError");
91 | }
92 |
93 | if (requirements.streaming) {
94 | throw new DOMException("streaming", "NotSupportedError");
95 | }
96 |
97 | if (messages.length === 0) {
98 | messages.push({
99 | role: "system",
100 | content: [{
101 | type: "text",
102 | text: "You are GPT-4, an LLM by OpenAI.",
103 | }],
104 | });
105 | }
106 |
107 | const prompt_data: types.ContentPart[] = [];
108 |
109 | if (prompt !== null) {
110 | prompt_data.push({
111 | type: "text",
112 | text: prompt,
113 | });
114 | }
115 |
116 | requirements.images?.forEach((image_url) => {
117 | prompt_data.push({
118 | type: "image_url",
119 | image_url: {
120 | url: image_url,
121 | },
122 | });
123 | });
124 |
125 |
126 | if (prompt_data.length !== 0) {
127 | messages.push({
128 | role: "user",
129 | content: prompt_data,
130 | });
131 |
132 |
133 | }
134 |
135 | // Make sure all existing images are valid (if any)
136 | // Things to check:
137 | // - Image still exists (discord's image storage is volatile)
138 | // - Image is png, jpg, gif or webp
139 |
140 | messages = await Promise.all(messages.map(async (message) => {
141 | message.content = message.content as types.ContentPart[];
142 |
143 | if (!Array.isArray(message.content)) {
144 | return message
145 | }
146 |
147 | if (message.content.length === 0) {
148 | return message
149 | }
150 |
151 | message.content = await Promise.all(message.content.map(async (part): Promise => {
152 | if (part.type === "image_url") {
153 | const imageData = await fetch(part.image_url.url)
154 |
155 | if (imageData.status !== 200) {
156 | message.role === "system"
157 | return {
158 | type: "text",
159 | text: "There was an image here, but it no longer exists. Ask the user to resend it if you need it."
160 | }
161 | }
162 |
163 | if (!imageData.headers.get("content-type")?.startsWith("image/")) {
164 | message.role === "system"
165 | return {
166 | type: "text",
167 | text: "There was an image here, but it was not a valid image. Ask the user to resend it if you need it."
168 | }
169 | }
170 |
171 | if (
172 | !part.image_url.url.endsWith(".png") &&
173 | !part.image_url.url.endsWith(".jpg") &&
174 | !part.image_url.url.endsWith(".gif") &&
175 | !part.image_url.url.endsWith(".webp"))
176 | {
177 | message.role === "system"
178 | return {
179 | type: "text",
180 | text: `There was an image here, but it was not a valid image format! Ask the user to resend in either .png, .jpg, .gif or .webp format. The image format was ${imageData.headers.get("content-type")?.split("/")[1]}.`
181 | }
182 | }
183 |
184 | return part
185 | } else {
186 | return part
187 | }
188 | }))
189 |
190 | return message
191 | }))
192 |
193 | const res = await fetch("https://api.openai.com/v1/chat/completions", {
194 | method: "POST",
195 | headers: {
196 | "Content-Type": "application/json",
197 | Authorization: `Bearer ${requirements?.env.OPENAI_API_KEY}`,
198 | },
199 | body: JSON.stringify({
200 | model: "gpt-4-turbo",
201 | messages: messages,
202 | tools,
203 | }),
204 | });
205 |
206 | let resp: types.Response = await res.json();
207 |
208 | if (resp.error) {
209 | throw new DOMException(resp.error.message, "ExecutionError");
210 | }
211 |
212 | messages.push(resp.choices[0].message);
213 |
214 | resp.messages = messages;
215 |
216 | if (resp.choices[0].finish_reason === "tool_calls") {
217 | if (callback) {
218 | callback({
219 | toolCalls: resp.choices[0].message.tool_calls,
220 | data: resp.choices[0].message.content,
221 | }, false);
222 | }
223 | resp = await doTools(resp, null, requirements);
224 | resp.choices[0].message.content = resp.choices[0].message.content as string;
225 | }
226 |
227 | if (callback) callback({ data: resp.choices[0].message.content }, true);
228 |
229 | return resp;
230 | }
231 |
--------------------------------------------------------------------------------
/bots/gpt_4o.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | const tools: types.Tool[] = [{
4 | type: "function",
5 | function: {
6 | name: "sanitycheck",
7 | description:
8 | "Returns true, as a sanity check to make sure function support is OK. If this fails, something's fucked.",
9 | parameters: {
10 | type: "object",
11 | properties: {
12 | useless: {
13 | type: "string",
14 | description:
15 | "You can put whatever here, it's not going to do anything.",
16 | },
17 | },
18 | required: ["useless"],
19 | },
20 | },
21 | }];
22 |
23 | export const information: types.information = {
24 | llmFileVersion: "1.0",
25 | env: ["OPENAI_API_KEY"],
26 | functions: true,
27 | functionsData: tools,
28 | multiModal: true,
29 | callbackSupport: true,
30 | streamingSupport: false,
31 | id: "gpt4o",
32 | name: "GPT-4 Omni",
33 | description: "OpenAI's most powerful model, with heightened multimodal capabilities!",
34 | highCostLLM: true,
35 | };
36 |
37 | async function doTools(
38 | res: types.Response,
39 | callback?:
40 | | ((information: types.callbackData, complete: boolean) => void)
41 | | null,
42 | requirements?: types.Requirements,
43 | ): Promise {
44 | if (res.choices[0].finish_reason !== "tool_calls") {
45 | throw "What The Shit?";
46 | }
47 |
48 | const toolCalls = res.choices[0].message.tool_calls!;
49 |
50 | // What if they happen to use it later?
51 | // deno-lint-ignore require-await
52 | const promises = toolCalls.map(async (tool) => {
53 | if (tool.function.name === "sanitycheck") {
54 | return {
55 | role: "tool",
56 | content: "true",
57 | tool_call_id: tool.id,
58 | };
59 | } else {
60 | return {
61 | role: "tool",
62 | content: "Unknown tool or not implemented",
63 | tool_call_id: tool.id,
64 | //};
65 | };
66 | }
67 | });
68 |
69 | // Use Promise.all to wait for all promises to resolve
70 | const results = await Promise.all(promises);
71 |
72 | results.forEach((result) => {
73 | res.messages.push(result);
74 | });
75 |
76 | const newres = await send(null, res.messages, callback, requirements);
77 |
78 | console.log(newres);
79 |
80 | return newres;
81 | }
82 |
83 | export async function send(
84 | prompt: string | null,
85 | messages: types.Message[],
86 | callback?:
87 | | ((information: types.callbackData, complete: boolean) => void)
88 | | null,
89 | requirements?: types.Requirements,
90 | ): Promise {
91 | if (!requirements?.env?.OPENAI_API_KEY) {
92 | throw new DOMException("env.OPENAI_API_KEY", "NotFoundError");
93 | }
94 |
95 | if (requirements.streaming) {
96 | throw new DOMException("streaming", "NotSupportedError");
97 | }
98 |
99 | if (messages.length === 0) {
100 | messages.push({
101 | role: "system",
102 | content: [{
103 | type: "text",
104 | text: "You are GPT-4 Omni, an LLM by OpenAI.",
105 | }],
106 | });
107 | }
108 |
109 | const prompt_data: types.ContentPart[] = [];
110 |
111 | if (prompt !== null) {
112 | prompt_data.push({
113 | type: "text",
114 | text: prompt,
115 | });
116 | }
117 |
118 | requirements.images?.forEach((image_url) => {
119 | prompt_data.push({
120 | type: "image_url",
121 | image_url: {
122 | url: image_url,
123 | },
124 | });
125 | });
126 |
127 | if (prompt_data.length !== 0) {
128 | messages.push({
129 | role: "user",
130 | content: prompt_data,
131 | });
132 |
133 |
134 | }
135 |
136 | // Make sure all existing images are valid (if any)
137 | // Things to check:
138 | // - Image still exists (discord's image storage is volatile)
139 | // - Image is png, jpg, gif or webp
140 |
141 | messages = await Promise.all(messages.map(async (message) => {
142 | message.content = message.content as types.ContentPart[];
143 |
144 | if (!Array.isArray(message.content)) {
145 | return message
146 | }
147 |
148 | if (message.content.length === 0) {
149 | return message
150 | }
151 |
152 | message.content = await Promise.all(message.content.map(async (part): Promise => {
153 | if (part.type === "image_url") {
154 | const imageData = await fetch(part.image_url.url)
155 |
156 | if (imageData.status !== 200) {
157 | message.role === "system"
158 | return {
159 | type: "text",
160 | text: "There was an image here, but it no longer exists. Ask the user to resend it if you need it."
161 | }
162 | }
163 |
164 | if (!imageData.headers.get("content-type")?.startsWith("image/")) {
165 | message.role === "system"
166 | return {
167 | type: "text",
168 | text: "There was an image here, but it was not a valid image. Ask the user to resend it if you need it."
169 | }
170 | }
171 |
172 | if (
173 | !part.image_url.url.endsWith(".png") &&
174 | !part.image_url.url.endsWith(".jpg") &&
175 | !part.image_url.url.endsWith(".gif") &&
176 | !part.image_url.url.endsWith(".webp"))
177 | {
178 | message.role === "system"
179 | return {
180 | type: "text",
181 | text: `There was an image here, but it was not a valid image format! Ask the user to resend in either .png, .jpg, .gif or .webp format. The image format was ${imageData.headers.get("content-type")?.split("/")[1]}.`
182 | }
183 | }
184 |
185 | return part
186 | } else {
187 | return part
188 | }
189 | }))
190 |
191 | return message
192 | }))
193 |
194 | const res = await fetch("https://api.openai.com/v1/chat/completions", {
195 | method: "POST",
196 | headers: {
197 | "Content-Type": "application/json",
198 | Authorization: `Bearer ${requirements?.env.OPENAI_API_KEY}`,
199 | },
200 | body: JSON.stringify({
201 | model: "gpt-4o",
202 | messages: messages,
203 | tools,
204 | }),
205 | });
206 |
207 | let resp: types.Response = await res.json();
208 |
209 | if (resp.error) {
210 | throw new DOMException(resp.error.message, "ExecutionError");
211 | }
212 |
213 | messages.push(resp.choices[0].message);
214 |
215 | resp.messages = messages;
216 |
217 | if (resp.choices[0].finish_reason === "tool_calls") {
218 | if (callback) {
219 | callback({
220 | toolCalls: resp.choices[0].message.tool_calls,
221 | data: resp.choices[0].message.content,
222 | }, false);
223 | }
224 | resp = await doTools(resp, null, requirements);
225 | resp.choices[0].message.content = resp.choices[0].message.content as string;
226 | }
227 |
228 | if (callback) callback({ data: resp.choices[0].message.content }, true);
229 |
230 | return resp;
231 | }
232 |
--------------------------------------------------------------------------------
/bots/llama_3.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | const tools: types.Tool[] = [{
4 | type: "function",
5 | function: {
6 | name: "sanitycheck",
7 | description:
8 | "Returns true, as a sanity check to make sure function support is OK. If this fails, something's fucked.",
9 | parameters: {
10 | type: "object",
11 | properties: {
12 | useless: {
13 | type: "string",
14 | description:
15 | "You can put whatever here, it's not going to do anything.",
16 | },
17 | },
18 | required: ["useless"],
19 | },
20 | },
21 | }];
22 |
23 | export const information: types.information = {
24 | llmFileVersion: "1.0",
25 | env: ["GROQ_API_KEY"],
26 | functions: true,
27 | functionsData: tools,
28 | multiModal: false,
29 | callbackSupport: true,
30 | streamingSupport: false,
31 | id: "llama3-groq",
32 | name: "Llama 3 70b (Groq)",
33 | description: "Meta's new flagship model. Powered by Groq!",
34 | highCostLLM: false,
35 | };
36 |
37 | // const db = await Deno.openKv("./db.sqlite")
38 |
39 | async function doTools(
40 | res: types.Response,
41 | callback?:
42 | | ((information: types.callbackData, complete: boolean) => void)
43 | | null,
44 | requirements?: types.Requirements,
45 | ): Promise {
46 | if (res.choices[0].finish_reason !== "tool_calls") {
47 | throw "What The Shit?";
48 | }
49 |
50 | const toolCalls = res.choices[0].message.tool_calls!;
51 |
52 | // What if they happen to use it later?
53 | // deno-lint-ignore require-await
54 | const promises = toolCalls.map(async (tool) => {
55 | if (tool.function.name === "sanitycheck") {
56 | return {
57 | role: "tool",
58 | content: "true",
59 | tool_call_id: tool.id,
60 | };
61 | } else {
62 | return {
63 | role: "tool",
64 | content: "Unknown tool or not implemented",
65 | tool_call_id: tool.id,
66 | };
67 | }
68 | });
69 |
70 | // Use Promise.all to wait for all promises to resolve
71 | const results = await Promise.all(promises);
72 |
73 | results.forEach((result) => {
74 | res.messages.push(result);
75 | });
76 |
77 | const newres = await send(null, res.messages, callback, requirements);
78 |
79 | console.log(newres);
80 |
81 | return newres;
82 | }
83 |
84 | export async function send(
85 | prompt: string | null,
86 | messages: types.Message[],
87 | callback?:
88 | | ((information: types.callbackData, complete: boolean) => void)
89 | | null,
90 | requirements?: types.Requirements,
91 | ): Promise {
92 | if (!requirements?.env?.GROQ_API_KEY) {
93 | throw new DOMException("env.GROQ_API_KEY", "NotFoundError");
94 | }
95 |
96 | if (requirements.streaming) {
97 | throw new DOMException("streaming", "NotSupportedError");
98 | }
99 | // here we go
100 |
101 | if (messages.length === 0) {
102 | messages.push({
103 | role: "system",
104 | content:
105 | "You are Llama, an LLM by Meta. You are running through a Discord bot named LLM Bot, by Eris.",
106 | });
107 | }
108 |
109 | if (prompt) {
110 | messages.push({
111 | role: "user",
112 | content: prompt,
113 | });
114 | }
115 |
116 | const res = await fetch("https://api.groq.com/openai/v1/chat/completions", {
117 | method: "POST",
118 | headers: {
119 | "Content-Type": "application/json",
120 | Authorization: `Bearer ${requirements.env.GROQ_API_KEY}`,
121 | },
122 | body: JSON.stringify({
123 | model: "llama3-70b-8192",
124 | messages: messages,
125 | tools
126 | }),
127 | });
128 |
129 | let resp: types.Response = await res.json();
130 |
131 | if (resp.error) {
132 | // Fuck.
133 | throw resp.error.message; // well at least they know why the fuck it crashed??
134 | }
135 |
136 | messages.push(resp.choices[0].message);
137 |
138 | resp.messages = messages;
139 |
140 | if (resp.choices[0].finish_reason === "tool_calls") {
141 | if (callback) {
142 | callback({
143 | toolCalls: resp.choices[0].message.tool_calls,
144 | data: resp.choices[0].message.content,
145 | }, false);
146 | }
147 | resp = await doTools(resp, null, requirements);
148 | resp.choices[0].message.content = resp.choices[0].message.content as string;
149 | }
150 |
151 | if (callback) callback({ data: resp.choices[0].message.content }, true);
152 |
153 | return resp;
154 | }
155 |
--------------------------------------------------------------------------------
/bots/mixtral.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | const tools: types.Tool[] = [{
4 | type: "function",
5 | function: {
6 | name: "sanitycheck",
7 | description:
8 | "Returns true, as a sanity check to make sure function support is OK. If this fails, something's fucked.",
9 | parameters: {
10 | type: "object",
11 | properties: {
12 | useless: {
13 | type: "string",
14 | description:
15 | "You can put whatever here, it's not going to do anything.",
16 | },
17 | },
18 | required: ["useless"],
19 | },
20 | },
21 | }];
22 |
23 | export const information: types.information = {
24 | llmFileVersion: "1.0",
25 | env: ["GROQ_API_KEY"],
26 | functions: true,
27 | functionsData: tools,
28 | multiModal: false,
29 | callbackSupport: true,
30 | streamingSupport: false,
31 | id: "mixtral-groq",
32 | name: "Mixtral (Groq)",
33 | description: "Mistral's MOE model. Powered by Groq!",
34 | highCostLLM: false,
35 | };
36 |
37 | // const db = await Deno.openKv("./db.sqlite")
38 |
39 | async function doTools(
40 | res: types.Response,
41 | callback?:
42 | | ((information: types.callbackData, complete: boolean) => void)
43 | | null,
44 | requirements?: types.Requirements,
45 | ): Promise {
46 | if (res.choices[0].finish_reason !== "tool_calls") {
47 | throw "What The Shit?";
48 | }
49 |
50 | const toolCalls = res.choices[0].message.tool_calls!;
51 |
52 | // What if they happen to use it later?
53 | // deno-lint-ignore require-await
54 | const promises = toolCalls.map(async (tool) => {
55 | if (tool.function.name === "sanitycheck") {
56 | return {
57 | role: "tool",
58 | content: "true",
59 | tool_call_id: tool.id,
60 | };
61 | } else {
62 | return {
63 | role: "tool",
64 | content: "Unknown tool or not implemented",
65 | tool_call_id: tool.id,
66 | //};
67 | };
68 | }
69 | });
70 |
71 | // Use Promise.all to wait for all promises to resolve
72 | const results = await Promise.all(promises);
73 |
74 | results.forEach((result) => {
75 | res.messages.push(result);
76 | });
77 |
78 | const newres = await send(null, res.messages, callback, requirements);
79 |
80 | console.log(newres);
81 |
82 | return newres;
83 | }
84 |
85 | export async function send(
86 | prompt: string | null,
87 | messages: types.Message[],
88 | callback?:
89 | | ((information: types.callbackData, complete: boolean) => void)
90 | | null,
91 | requirements?: types.Requirements,
92 | ): Promise {
93 | if (!requirements?.env?.GROQ_API_KEY) {
94 | throw new DOMException("env.GROQ_API_KEY", "NotFoundError");
95 | }
96 |
97 | if (requirements.streaming) {
98 | throw new DOMException("streaming", "NotSupportedError");
99 | }
100 | // here we go
101 |
102 | if (messages.length === 0) {
103 | messages.push({
104 | role: "system",
105 | content:
106 | "You are Mixtral, an LLM by Mistral AI. You are running through a Discord bot named LLM Bot, by Eris.",
107 | });
108 | }
109 |
110 | if (prompt) {
111 | messages.push({
112 | role: "user",
113 | content: prompt,
114 | });
115 | }
116 |
117 | const res = await fetch("https://api.groq.com/openai/v1/chat/completions", {
118 | method: "POST",
119 | headers: {
120 | "Content-Type": "application/json",
121 | Authorization: `Bearer ${requirements.env.GROQ_API_KEY}`,
122 | },
123 | body: JSON.stringify({
124 | model: "mixtral-8x7b-32768",
125 | messages: messages,
126 | tools
127 | }),
128 | });
129 |
130 | let resp: types.Response = await res.json();
131 |
132 | if (resp.error) {
133 | // Fuck.
134 | throw resp.error.message; // well at least they know why the fuck it crashed??
135 | }
136 |
137 | messages.push(resp.choices[0].message);
138 |
139 | resp.messages = messages;
140 |
141 | if (resp.choices[0].finish_reason === "tool_calls") {
142 | if (callback) {
143 | callback({
144 | toolCalls: resp.choices[0].message.tool_calls,
145 | data: resp.choices[0].message.content,
146 | }, false);
147 | }
148 | resp = await doTools(resp, null, requirements);
149 | resp.choices[0].message.content = resp.choices[0].message.content as string;
150 | }
151 |
152 | if (callback) callback({ data: resp.choices[0].message.content }, true);
153 |
154 | return resp;
155 | }
156 |
--------------------------------------------------------------------------------
/changelog.md:
--------------------------------------------------------------------------------
1 | # 11/2/2023
2 |
3 | Scrapped the llama2.ts file in favor of openrouter Added openrouter support
4 | Worked on the basis for model swapping Rethought a bit of code I had in place
5 | Switched GPT4 back to turbo so I can use functions Began integration of VDB
6 | (GPT4 only for now while I get my shit together)
7 |
8 | # 1/20/2024
9 |
10 | Added GPT-4 Vision for those who like it did formatting uhh the uhh fixed VDB
11 | did some magijcks to the rest of the files idk man I forgor
12 |
13 | # 3/31/2024: April Fools, Idiot
14 |
15 | Reworked how LLMs are loaded, the bot is less of a full solution and more of a
16 | framework now
17 |
--------------------------------------------------------------------------------
/client.ts:
--------------------------------------------------------------------------------
1 | // Here we go again.
2 | import {
3 | ActivityType,
4 | Client,
5 | GatewayIntentBits,
6 | Partials,
7 | } from "npm:discord.js";
8 |
9 | import { config } from "npm:dotenv";
10 |
11 | config();
12 |
13 | const token = Deno.env.get("DISCORD_TOKEN");
14 | const id = Deno.env.get("APP_ID");
15 |
16 | if (typeof token !== "string") {
17 | throw "No Discord bot token set! Set DISCORD_TOKEN in .env to your bot's token!";
18 | } else if (typeof id !== "string") {
19 | throw `No Discord app ID set! Set APP_ID in .env to your bot's client ID!`;
20 | }
21 |
22 | const client: Client = new Client({
23 | intents: [
24 | GatewayIntentBits.Guilds,
25 | GatewayIntentBits.GuildMessages,
26 | GatewayIntentBits.MessageContent,
27 | GatewayIntentBits.GuildMembers,
28 | GatewayIntentBits.DirectMessages,
29 | ],
30 | partials: [
31 | // Partials.User,
32 | Partials.Channel,
33 | // Partials.GuildMember,
34 | // Partials.Message,
35 | // Partials.Reaction,
36 | // Partials.GuildScheduledEvent,
37 | // Partials.ThreadMember,
38 | ],
39 | });
40 |
41 | client.on("ready", (c) => {
42 | console.log(`Logged in as ${c.user.tag}!`);
43 |
44 | c.user.setActivity("people use ChatGPT differently", {
45 | type: ActivityType.Watching,
46 | });
47 | });
48 |
49 | await client.login(Deno.env.get("DISCORD_TOKEN"));
50 |
51 | export default client;
52 |
--------------------------------------------------------------------------------
/lib/eval.ts:
--------------------------------------------------------------------------------
1 | export function safeEval(code: string): Promise {
2 | return new Promise((resolve, reject) => {
3 | const worker = new Worker(
4 | import.meta.resolve("./eval_worker.js"),
5 | {
6 | type: "module",
7 | // @ts-ignore
8 | name,
9 | deno: {
10 | //@ts-ignore ignore the namespace annotation. Deno < 1.22 required this
11 | namespace: false,
12 | permissions: {
13 | env: false,
14 | hrtime: false,
15 | net: true,
16 | ffi: false,
17 | read: false,
18 | run: false,
19 | write: false,
20 | },
21 | },
22 | },
23 | );
24 |
25 | let timeoutId: number;
26 |
27 | worker.onmessage = (msg) => {
28 | console.log(msg.data);
29 | clearTimeout(timeoutId);
30 | if (typeof msg.data !== "string") {
31 | worker.terminate();
32 | reject("Worker returned a corrupt message!");
33 | } else {
34 | worker.terminate();
35 | resolve(msg.data);
36 | }
37 | };
38 |
39 | worker.postMessage(code);
40 |
41 | timeoutId = setTimeout(() => {
42 | console.log("early termination");
43 | worker.terminate(); // What's taking YOU so long, hmm?
44 | reject("Worker did not respond in time!");
45 | }, 10000);
46 | });
47 | }
48 |
--------------------------------------------------------------------------------
/lib/eval_worker.js:
--------------------------------------------------------------------------------
1 | // deno-lint-ignore no-global-assign
2 | console = null;
3 |
4 | self.onmessage = async (e) => {
5 | try {
6 | const response = `${eval(e.data)}`;
7 |
8 | postMessage(response);
9 | } catch (err) {
10 | postMessage(`Error occured during code processing: ${err}`);
11 | }
12 | };
13 |
--------------------------------------------------------------------------------
/lib/importLLMFile.ts:
--------------------------------------------------------------------------------
1 | // Automatically imports an LLM File to the global object (availableLLMs) and gives you information about it so you don't have to waste time writing an implementation.
2 |
3 | import * as types from "../main.d.ts";
4 |
5 | import { build, transform } from "https://deno.land/x/esbuild@v0.20.2/mod.js";
6 |
7 | export default async function importLLMFile(modulePath: string) {
8 | try {
9 | if (!globalThis.availableLLMs) {
10 | globalThis.availableLLMs = {};
11 | }
12 |
13 | const tsCode = await Deno.readTextFile(Deno.cwd() + `/${modulePath}`);
14 |
15 | const { code } = await transform(tsCode, {
16 | loader: "ts",
17 | });
18 |
19 | const base64Data = btoa(code);
20 |
21 | // Create the Data URL
22 | const dataURL = `data:text/plain;base64,${base64Data}`;
23 |
24 | const module: types.llmFile = await import(dataURL);
25 |
26 | if (module && module.information && typeof module.send === "function") {
27 | globalThis.availableLLMs[module.information.id] = {
28 | information: module.information,
29 | send: module.send,
30 | };
31 |
32 | return module.information; // Return the information object
33 | } else {
34 | console.error(
35 | `LLMFile ${modulePath}' doesn't have the right exports!`,
36 | );
37 | return null; // Return null if the module doesn't have the required exports
38 | }
39 | } catch (error) {
40 | if (Deno.env.get("debug") === "true") {
41 | console.error(`Error importing module ${modulePath}':`, error);
42 | }
43 | return null; // Return null if there's an error importing the module
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/lib/sanity_check.ts:
--------------------------------------------------------------------------------
1 | // This is only to be used if you feel like something got fucked up while coding the bot.
2 |
3 | // This takes 2 arguments: the bot file to test (--filename) and what test you want to do: "just_work" or "is_functional" (--type)
4 |
5 | // "just_work", as the name suggests, just makes sure the LLM file returns a valid output. If it fails, it'll throw an error.
6 |
7 | // "is_functional" asks the bot to say nothing but true and checks if it matches. If it doesn't, error will be thrown.
8 |
9 | // If both sanity checks pass, it's probably safe to use that code.
10 |
11 | let llmfile_path = "../bots/REPLACEME.ts";
12 | let type = "";
13 |
14 | const filenameIndex = Deno.args.indexOf("--filename");
15 | const typeIndex = Deno.args.indexOf("--type");
16 |
17 | if (filenameIndex !== -1 && filenameIndex + 1 < Deno.args.length) {
18 | const filename = Deno.args[filenameIndex + 1];
19 |
20 | llmfile_path = llmfile_path.replace("REPLACEME", filename);
21 | } else {
22 | console.error(
23 | 'No filename argument provided or value is missing (--filename "chatgpt")',
24 | );
25 | Deno.exit(1);
26 | }
27 |
28 | if (typeIndex !== -1 && typeIndex + 1 < Deno.args.length) {
29 | // Get the value of the type argument
30 | type = Deno.args[typeIndex + 1];
31 |
32 | // Check if the type is one of the specified values
33 | if (type !== "is_functional" && type !== "just_work") {
34 | console.error(
35 | "Invalid type argument. It should be either 'is_functional' or 'just_work'",
36 | );
37 | Deno.exit(1); // Exit with error code 1
38 | }
39 | } else {
40 | console.log(
41 | 'No type argument provided or value is missing (--type "is_functional")',
42 | );
43 | Deno.exit(1); // Exit with error code 1
44 | }
45 |
46 | import(llmfile_path).then((module) => {
47 | // Module is imported successfully
48 | console.log("Module imported:", module);
49 |
50 | if (module.send) {
51 | if (type === "just_work") {
52 | console.log("Checking if the LLM file is functional...");
53 |
54 | try {
55 | const res = module.send([{
56 | role: "system",
57 | content:
58 | "You are running in a sanity check mode. Output anything, but making it very short is advisable.",
59 | }]);
60 | } catch (err) {
61 | throw `BotFailed: Error was thrown while sending message (${err})`;
62 | }
63 | }
64 | } else {
65 | console.error("This isn't an LLM file (no send function on module)! >:/");
66 | }
67 | }).catch((error) => {
68 | // Error occurred while importing module
69 | console.error("Error importing module:", error);
70 | });
71 |
--------------------------------------------------------------------------------
/main.d.ts:
--------------------------------------------------------------------------------
1 | // These are ALL the types LLM Files can use.
2 | // Also embeds some types for LLM Bot, if it uses any.
3 |
4 | export type TextContent = {
5 | type: "text";
6 | text: string;
7 | };
8 |
9 | export type ImageContentPart = {
10 | type: "image_url";
11 | image_url: {
12 | url: string;
13 | detail?: string; // Optional, defaults to 'auto'
14 | };
15 | };
16 |
17 | export type ContentPart = TextContent | ImageContentPart;
18 |
19 | export type Message = {
20 | role: string;
21 | content: T extends true ? (string | ContentPart[] | null) : (string | null);
22 | name?: string;
23 | tool_calls?: ToolCall[];
24 | tool_call_id?: string;
25 | };
26 |
27 | export type ToolCall = {
28 | id: string;
29 | type: "function";
30 | function: {
31 | name: string;
32 | arguments: string; // JSON format arguments
33 | };
34 | };
35 |
36 | export type Tool = {
37 | type: "function";
38 | function: {
39 | description?: string;
40 | name: string;
41 | parameters: object; // JSON Schema object
42 | };
43 | };
44 |
45 | export type Response = {
46 | id: string;
47 | choices: Choice[];
48 | messages: Message[];
49 | created: number; // Unix timestamp
50 | model: string;
51 | object: "chat.completion";
52 | error?: {
53 | code: number;
54 | message: string;
55 | }; // If this is here, shit
56 | };
57 |
58 | export type Choice = {
59 | finish_reason: string | null; // Depends on the model. Ex: 'stop' | 'length' | 'content_filter' | 'tool_calls' | 'function_call'
60 | message: Message;
61 | };
62 |
63 | export type information = {
64 | llmFileVersion: string;
65 | env?: string[];
66 | functions: boolean;
67 | functionsData?: Tool[];
68 | multiModal: boolean;
69 | callbackSupport: boolean;
70 | streamingSupport?: boolean;
71 | id: string;
72 | name: string;
73 | description: string;
74 | highCostLLM: boolean;
75 | };
76 |
77 | export type Requirements = {
78 | env?: {
79 | [envName: string]: string;
80 | };
81 | images?: string[];
82 | streaming?: boolean;
83 | };
84 |
85 | export type callbackData = {
86 | toolCalls?: ToolCall[]; // What toolcalls did the LLM do if it called tools?
87 | data: string | null; // Whatever the LLM said (or a chunk if streaming is on)
88 | };
89 |
90 | export type llmFile = {
91 | information: information;
92 | send: (
93 | prompt: string,
94 | messages: Message[],
95 | callback?: ((information: callbackData, complete: boolean) => void) | null,
96 | requirements?: Requirements,
97 | ) => Promise;
98 | };
99 |
100 | declare global {
101 | // deno-lint-ignore no-var
102 | var availableLLMs: {
103 | [id: string]: llmFile;
104 | };
105 | }
106 |
--------------------------------------------------------------------------------
/main.ts:
--------------------------------------------------------------------------------
1 | ///
2 |
3 | import { existsSync, walk } from "https://deno.land/std@0.221.0/fs/mod.ts";
4 |
5 | import importLLMFile from "./lib/importLLMFile.ts";
6 |
7 | if (!existsSync("./bots")) {
8 | throw new DOMException(
9 | "Add the /bots directory and populate it with LLMFiles to use the bot! As an example, copy the directory from the Github.",
10 | "NoLLMsAddedError",
11 | );
12 | }
13 |
14 | for await (const entry of await walk("./bots")) {
15 | if (entry.isFile && entry.name.endsWith(".ts")) {
16 | await importLLMFile(
17 | entry.path,
18 | );
19 | }
20 | }
21 |
22 | console.log("LLMs initialized.")
23 |
24 | import * as types from "./main.d.ts";
25 |
26 | type messageData = {
27 | id: string;
28 | messages: types.Message[];
29 | };
30 |
31 | import client from "./client.ts";
32 |
33 | console.log(
34 | "Everything looks good!",
35 | Object.keys(availableLLMs).length,
36 | "LLMs were imported.",
37 | );
38 |
39 | await import("./slashcode.ts");
40 |
41 | import { ChannelType, Message } from "npm:discord.js";
42 |
43 | const db = await Deno.openKv("./db.sqlite");
44 |
45 | function splitStringIntoChunks(
46 | inputString: string,
47 | chunkSize: number = 1999,
48 | ): string[] {
49 | const lines: string[] = inputString.split("\n");
50 | const chunks: string[] = [];
51 | let currentChunk: string = "";
52 |
53 | for (const line of lines) {
54 | if (currentChunk.length + line.length + 1 > chunkSize) {
55 | chunks.push(currentChunk.trim());
56 | currentChunk = line;
57 | } else {
58 | if (currentChunk) {
59 | currentChunk += "\n";
60 | }
61 | currentChunk += line;
62 | }
63 | }
64 |
65 | if (currentChunk) {
66 | chunks.push(currentChunk.trim());
67 | }
68 |
69 | return chunks;
70 | }
71 |
72 | /*
73 | New database example:
74 |
75 | {
76 | channels: [1],
77 | users: {
78 | 0: {
79 | current_bot: "chatgpt",
80 | current_conversation: 0,
81 | conversations: {
82 | chatgpt: [
83 | {
84 | id: "completion-37",
85 | messages: [{}] // Alan, insert message object every time you finish here. Wait, Alan, are you still on the team?
86 | }
87 | ]
88 | }
89 | }
90 | },
91 | }
92 | */
93 |
94 | const getImagesFromMessage = async (message: Message) => {
95 | const images: string[] = [];
96 |
97 | // Process attachments
98 | message.attachments.forEach((image) => {
99 | images.push(image.url);
100 | });
101 |
102 | // Process URLs in message content
103 | const regx = message.content.match(
104 | /^(?:(?:(?:https?|ftp):)?\/\/)(?:\S+(?::\S*)?@)?(?:(?!(?:10|127)(?:\.\d{1,3}){3})(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z0-9\u00a1-\uffff][a-z0-9\u00a1-\uffff_-]{0,62})?[a-z0-9\u00a1-\uffff]\.)+(?:[a-z\u00a1-\uffff]{2,}\.?))(?::\d{2,5})?(?:[/?#]\S*)?$/i,
105 | );
106 |
107 | if (regx) {
108 | // Use Promise.all to wait for all asynchronous operations to complete
109 | const resultArray = await Promise.all(regx.map(async (link) => {
110 | const isImage = (await fetch(link)).headers.get("Content-Type")
111 | ?.startsWith("image/");
112 | if (isImage) {
113 | console.log(link);
114 | message.content.replace(link, "");
115 | return link;
116 | }
117 | return null;
118 | }));
119 |
120 | const filteredImages: string[] = [];
121 |
122 | resultArray.forEach((link) => {
123 | if (link !== null) filteredImages.push(link);
124 | });
125 |
126 | images.push(...filteredImages);
127 | }
128 |
129 | // Process stickers
130 | message.stickers.forEach((sticker) => {
131 | images.push(sticker.url);
132 | });
133 |
134 | return images;
135 | };
136 |
137 | client.on("messageCreate", async (message) => {
138 | let isBotChannel = (await db.get([
139 | "channels",
140 | message.channel.id,
141 | ])).value;
142 |
143 | if (isBotChannel === null) {
144 | await db.set(
145 | ["channels", message.channel.id],
146 | false,
147 | );
148 |
149 | isBotChannel = false;
150 | }
151 |
152 | if (message.author.bot || JSON.stringify(message.flags) === "4096") return; // The "4096" flag is the @silent flag on discord.
153 | if (
154 | message.channel.type === ChannelType.DM ||
155 | isBotChannel
156 | ) {
157 | let error = false; // Tracks if we've already pestered the user with an error / message :\
158 |
159 | const llm =
160 | (await db.get(["users", message.author.id, "current_bot"])).value; // After reading the typedocs I realized this is the cleaner way to do this
161 |
162 | if (llm === null) {
163 | await message.reply(
164 | "Looks like this is your first time using this bot! Run /info to learn how to use the full potential of this bot, and set your desired LLM using /set-ai!",
165 | );
166 | error = true;
167 | return;
168 | } else if (
169 | !Object.prototype.hasOwnProperty.call(availableLLMs, llm)
170 | ) {
171 | // current LLM is removed/corrupted
172 | await message.reply(
173 | "Your current LLM is corrupted or removed! Set a new LLM at /set-ai!",
174 | );
175 | return;
176 | }
177 |
178 | if (
179 | availableLLMs[llm].information.highCostLLM &&
180 | Deno.env.get("PREMIUM_ENFORCEMENT") === "true"
181 | ) {
182 | const guild = client.guilds.resolve(Deno.env.get("PRIMARY_GUILD") || "0");
183 | if (guild) {
184 | const member = await guild?.members.fetch(message.author.id);
185 | if (
186 | !member.premiumSince &&
187 | Deno.env.get("PRIMARY_GUILD") !== message.guild?.id
188 | ) {
189 | message.reply(
190 | "This LLM is for premium users only! Boost the server to gain access to this LLM, or join the bot host's primary server!",
191 | );
192 | return;
193 | }
194 | } else {
195 | message.reply(
196 | "your developer is terrible at his job (Premium lock config not set properly! This LLM is marked as high-cost, have the owner of the bot finish setup.)",
197 | );
198 | return;
199 | }
200 | }
201 |
202 | let isMessageProcessing = (await db.get([
203 | "users",
204 | message.author.id,
205 | "messageWaiting",
206 | ])).value;
207 |
208 | if (isMessageProcessing) {
209 | try {
210 | await message.delete();
211 | return;
212 | } catch (_err) {
213 | await message.reply("A message is already being processed!");
214 | return;
215 | }
216 | } else {
217 | isMessageProcessing = true;
218 |
219 | await db.set(
220 | ["users", message.author.id, "messageWaiting"],
221 | isMessageProcessing,
222 | );
223 | }
224 |
225 | let curconv = (await db.get([
226 | "users",
227 | message.author.id,
228 | "current_conversation",
229 | ])).value;
230 |
231 | if (curconv === null) {
232 | // They haven't used this LLM before
233 | curconv = 0;
234 | await db.set(
235 | ["users", message.author.id, "current_conversation"],
236 | curconv,
237 | );
238 |
239 | if (error === false) {
240 | await message.reply(
241 | "Started conversation! Use /wipe to reset this conversation.",
242 | );
243 | error = true;
244 | }
245 | }
246 |
247 | let messages = (await db.get([
248 | "users",
249 | message.author.id,
250 | "conversations",
251 | llm,
252 | ])).value!;
253 |
254 | if (messages === null) {
255 | // No conversations for this LLM.
256 | messages = [{
257 | id: "New Conversation",
258 | messages: [],
259 | }];
260 |
261 | if (error === false) {
262 | await message.reply(
263 | "Started conversation! Use /wipe to reset this conversation.",
264 | );
265 | }
266 | }
267 |
268 | const curmsgs = messages[curconv].messages;
269 |
270 | const msg = await message.reply("Sending message...");
271 |
272 | const requirements = availableLLMs[llm].information;
273 |
274 | const reqobject: types.Requirements = {};
275 |
276 | if (requirements.multiModal) {
277 | const images: string[] = await getImagesFromMessage(message);
278 |
279 | reqobject.images = images;
280 | }
281 |
282 | if (requirements.env) {
283 | reqobject.env = {};
284 |
285 | requirements.env.forEach((envValue) => {
286 | if (!Deno.env.get(envValue)) {
287 | throw `Required env value "${envValue}" not found, add it to .env!`;
288 | }
289 |
290 | reqobject.env![envValue] = Deno.env.get(envValue)!;
291 | });
292 | }
293 |
294 | reqobject.streaming = false; // No.
295 |
296 | try {
297 | const resp = await availableLLMs[llm].send(
298 | message.content,
299 | curmsgs,
300 | null,
301 | reqobject,
302 | );
303 |
304 | messages[curconv].messages = resp.messages;
305 |
306 | await db.set(
307 | ["users", message.author.id, "conversations", llm],
308 | messages,
309 | );
310 |
311 | const messagechunks = splitStringIntoChunks(
312 | resp.choices[0].message.content!,
313 | 2000,
314 | );
315 |
316 | let cvalue = 0;
317 |
318 | messagechunks.forEach((chunk) => {
319 | if (cvalue === 0) {
320 | cvalue = 1;
321 | isMessageProcessing = false;
322 |
323 | db.set(
324 | ["users", message.author.id, "messageWaiting"],
325 | isMessageProcessing,
326 | );
327 | msg.edit(chunk);
328 | } else {
329 | message.reply(chunk);
330 | }
331 | });
332 | } catch (err) {
333 | isMessageProcessing = false;
334 |
335 | db.set(
336 | ["users", message.author.id, "messageWaiting"],
337 | isMessageProcessing,
338 | );
339 | msg.edit(
340 | "LLM threw an error in processing! Contact the admins to have them handle and report the bug.",
341 | );
342 | console.error(
343 | `Model ${llm} threw an error! If you can't understand the error, contact the developer (Eris) for help. Error:`,
344 | err,
345 | );
346 | return;
347 | }
348 | }
349 | });
350 |
--------------------------------------------------------------------------------
/prototype/README.md:
--------------------------------------------------------------------------------
1 | This folder contains code that is being prototyped for use in LLM Bot.
2 |
3 | Expect:
4 |
5 | - Security vulns
6 | - General shitty code
7 | - Horribly horrendous usage of the database
8 |
9 | # RUNNING ANYTHING IN HERE IS AT. YOUR. OWN. RISK!!!
10 |
--------------------------------------------------------------------------------
/prototype/fixGPTCode.ts:
--------------------------------------------------------------------------------
1 | import * as types from "../main.d.ts";
2 |
3 | type ClaudeMessage = {
4 | role: string;
5 | content: {
6 | type: string;
7 | source?: {
8 | type: string;
9 | media_type: string;
10 | data: string;
11 | };
12 | text?: string;
13 | }[];
14 | };
15 |
16 | function convertMessages(
17 | messages: (ClaudeMessage | types.Message)[],
18 | ): (ClaudeMessage | types.Message)[] {
19 | return messages.map((message) => {
20 | if ("content" in message && Array.isArray(message.content)) {
21 | const content: types.ContentPart[] = [];
22 | for (const part of message.content) {
23 | if (part.type === "text") {
24 | content.push({ type: "text", text: part.text || "" });
25 | } else if (part.type === "image_url") {
26 | content.push({
27 | type: "image_url",
28 | image_url: {
29 | url: part.source?.data || "",
30 | detail: part.source?.media_type || "auto",
31 | },
32 | });
33 | }
34 | }
35 |
36 | return {
37 | role: message.role,
38 | content: content.length > 0 ? content : null,
39 | };
40 | } else {
41 | const content: {
42 | type: string;
43 | text?: string;
44 | source?: { type: string; media_type: string; data: string };
45 | }[] = [];
46 | if (Array.isArray(message.content)) {
47 | for (const part of message.content) {
48 | if (typeof part === "string") {
49 | content.push({ type: "text", text: part });
50 | } else if (part.type === "text") {
51 | content.push({ type: "text", text: part.text });
52 | } else if (part.type === "image_url") {
53 | content.push({
54 | type: "image",
55 | source: {
56 | type: "image_url",
57 | media_type: "image",
58 | data: part.image_url.url,
59 | },
60 | });
61 | }
62 | }
63 | }
64 |
65 | return {
66 | role: message.role,
67 | content,
68 | };
69 | }
70 | });
71 | }
72 |
73 | console.log(
74 | convertMessages([{
75 | "role": "user",
76 | "content": [{ "type": "text", "text": "Hello, Claude" }],
77 | }]),
78 | );
79 |
--------------------------------------------------------------------------------
/slashcode.ts:
--------------------------------------------------------------------------------
1 | import client from "./client.ts";
2 |
3 | import * as types from "./main.d.ts";
4 |
5 | import { BingImageCreator } from "https://esm.sh/@timefox/bic-sydney@1.1.4";
6 | import crypto from "node:crypto";
7 |
8 | console.log("Loading slash commands...");
9 |
10 | type messageData = {
11 | id: string;
12 | messages: types.Message[];
13 | };
14 |
15 | // import { addDocument } from "./vdb.ts";
16 |
17 | import { config } from "npm:dotenv";
18 | config();
19 |
20 | import { REST } from "npm:@discordjs/rest";
21 |
22 | import { Routes } from "npm:discord-api-types/v9";
23 |
24 | import {
25 | ActionRowBuilder,
26 | ColorResolvable,
27 | EmbedBuilder,
28 | PermissionFlagsBits,
29 | RESTPostAPIChatInputApplicationCommandsJSONBody,
30 | SlashCommandBuilder,
31 | StringSelectMenuBuilder,
32 | } from "npm:discord.js";
33 |
34 | function splitStringIntoChunks(
35 | inputString: string,
36 | chunkSize: number = 1999,
37 | ): string[] {
38 | const lines: string[] = inputString.split("\n");
39 | const chunks: string[] = [];
40 | let currentChunk: string = "";
41 |
42 | for (const line of lines) {
43 | if (currentChunk.length + line.length + 1 > chunkSize) {
44 | chunks.push(currentChunk.trim());
45 | currentChunk = line;
46 | } else {
47 | if (currentChunk) {
48 | currentChunk += "\n";
49 | }
50 | currentChunk += line;
51 | }
52 | }
53 |
54 | if (currentChunk) {
55 | chunks.push(currentChunk.trim());
56 | }
57 |
58 | return chunks;
59 | }
60 |
61 | const commands: RESTPostAPIChatInputApplicationCommandsJSONBody[] = [];
62 |
63 | const db = await Deno.openKv("./db.sqlite");
64 |
65 | const command1 = new SlashCommandBuilder();
66 | command1.setName("info");
67 | command1.setDescription("Gives some information about this bot! :)");
68 | commands.push(command1.toJSON());
69 |
70 | const command2 = new SlashCommandBuilder();
71 | command2.setName("wipe");
72 | command2.setDescription(
73 | "Resets your conversation with your current AI, letting you start over.",
74 | );
75 | commands.push(command2.toJSON());
76 |
77 | const command3 = new SlashCommandBuilder();
78 | command3.setName("ban");
79 | command3.setDescription(
80 | "Bans a user from being able to use the bot! THIS IS FOR THE BOT DEVELOPER!!!",
81 | );
82 | command3.addStringOption((option) =>
83 | option.setName("id").setDescription("UserID to send to brazil").setRequired(
84 | true,
85 | )
86 | );
87 | commands.push(command3.toJSON());
88 |
89 | const command4 = new SlashCommandBuilder();
90 | command4.setName("unban");
91 | command4.setDescription(
92 | "Unbans a user from the bot! THIS IS FOR THE BOT DEVELOPER!!!",
93 | );
94 | command4.addStringOption((option) =>
95 | option.setName("id").setDescription("UserID to remove from brazil")
96 | .setRequired(true)
97 | );
98 | commands.push(command4.toJSON());
99 |
100 | const command5 = new SlashCommandBuilder();
101 | command5.setName("remove-conversation");
102 | command5.setDescription("Removes a conversation from your list.");
103 | commands.push(command5.toJSON());
104 |
105 | const command6 = new SlashCommandBuilder();
106 | command6.setName("set-conversation");
107 | command6.setDescription("Choose which conversation you are using.");
108 | commands.push(command6.toJSON());
109 |
110 | const command7 = new SlashCommandBuilder();
111 | command7.setName("view-conversation");
112 | command7.setDescription("See what conversations you have.");
113 | commands.push(command7.toJSON());
114 |
115 | const command8 = new SlashCommandBuilder();
116 | command8.setName("new-conversation");
117 | command8.setDescription("Start a new conversation with your chosen AI.");
118 | commands.push(command8.toJSON());
119 |
120 | const command9 = new SlashCommandBuilder();
121 | command9.setName("set-ai");
122 | command9.setDescription("Switch between the options for using this bot.");
123 | commands.push(command9.toJSON());
124 |
125 | const command10 = new SlashCommandBuilder();
126 | command10.setName("create-image");
127 | command10.setDescription(
128 | "Create a 1024x1024 image using Stable Diffusion! (Powered by Salad.com)",
129 | );
130 | command10.addStringOption((option) =>
131 | option.setName("prompt").setDescription(
132 | "Prompt to be sent to Stable Diffusion",
133 | ).setRequired(true)
134 | );
135 | commands.push(command10.toJSON());
136 |
137 | const command11 = new SlashCommandBuilder();
138 | command11.setName("get-image");
139 | command11.setDescription("Get an image that was made by Stable Diffusion.");
140 | commands.push(command11.toJSON());
141 |
142 | const command12 = new SlashCommandBuilder();
143 | command12.setName("add-document");
144 | command12.setDescription(
145 | "Add a document to this bot's information database (Powered by Supabase).",
146 | );
147 | command12.setDefaultMemberPermissions(PermissionFlagsBits.ManageChannels);
148 | command12.addStringOption((option) =>
149 | option.setName("file-name").setDescription(
150 | "The name of the file for the vector database.",
151 | ).setRequired(true)
152 | );
153 | command12.addAttachmentOption((option) =>
154 | option.setName("file").setDescription("The file to be added to the database.")
155 | .setRequired(true)
156 | );
157 | commands.push(command12.toJSON());
158 |
159 | const command13 = new SlashCommandBuilder();
160 | command13.setName("create-image-bingchat");
161 | command13.setDescription(
162 | "Create an image using DALL-E 3! (Powered by Bing Chat Image Maker)",
163 | );
164 | command13.addStringOption((option) =>
165 | option.setName("prompt").setDescription("Prompt to be sent to DALL-E 3")
166 | .setRequired(true)
167 | );
168 | commands.push(command13.toJSON());
169 |
170 | const command14 = new SlashCommandBuilder();
171 | command14.setName("oops");
172 | command14.setDescription(
173 | "Bot crashed while sending a message? Use this to fix it.",
174 | );
175 | commands.push(command14.toJSON());
176 |
177 | const command15 = new SlashCommandBuilder();
178 | command15.setName("channel");
179 | command15.setDefaultMemberPermissions(16);
180 | command15.setDescription(
181 | "See what channels are set for usage on the bot in your server.",
182 | );
183 | command15.addSubcommand((subcommand) =>
184 | subcommand
185 | .setName("add")
186 | .setDescription("Designate a channel as one the bot should respond in.")
187 | .addChannelOption((option) =>
188 | option.setName("channel")
189 | .setRequired(true)
190 | .setDescription("The channel to add the bot to")
191 | )
192 | );
193 | command15.addSubcommand((subcommand) =>
194 | subcommand
195 | .setName("remove")
196 | .setDescription("Remove a channel the bot should respond in.")
197 | .addChannelOption((option) =>
198 | option.setName("channel")
199 | .setRequired(true)
200 | .setDescription("The channel to remove the bot from")
201 | )
202 | );
203 | commands.push(command15.toJSON());
204 |
205 | const command16 = new SlashCommandBuilder();
206 | command16.setName("send");
207 | command16.setDescription(
208 | "Send a message to the bot.",
209 | );
210 | command16.addStringOption((option) =>
211 | option.setName("prompt").setDescription("Prompt to be sent to your chosen LLM")
212 | .setRequired(true)
213 | );
214 | commands.push(command16.toJSON());
215 |
216 | commands.forEach((obj) => {
217 | obj.integration_types = [0, 1];
218 | });
219 |
220 | const appid: string = Deno.env.get("APP_ID")!;
221 | const token: string = Deno.env.get("DISCORD_TOKEN")!;
222 |
223 | const rest = new REST({ version: "10" }).setToken(token);
224 |
225 | // Send slash commands to Discord, create event handler.
226 | try {
227 | console.log("Started refreshing application (/) commands.");
228 |
229 | await rest.put(Routes.applicationCommands(appid), {
230 | body: commands,
231 | });
232 |
233 | console.log("Successfully reloaded application (/) commands.");
234 |
235 | console.log("Loaded slash commands successfully!");
236 | } catch (error) {
237 | console.error(error);
238 | }
239 |
240 | client.on("interactionCreate", async (interaction) => {
241 | if (interaction.isStringSelectMenu()) {
242 | if (interaction.customId === "set-ai") {
243 | const llm = interaction.values[0];
244 |
245 | await db.set(["users", interaction.user.id, "current_bot"], llm);
246 |
247 | await interaction.reply({
248 | content: `Set your LLM to \`${llm}\`!`,
249 | ephemeral: true,
250 | });
251 | }
252 | }
253 | if (!interaction.isChatInputCommand()) return;
254 | if (interaction.commandName === "info") {
255 | const colorstr: string = "#" +
256 | Math.floor(Math.random() * 16777215).toString(16);
257 |
258 | const color = colorstr as ColorResolvable;
259 |
260 | const embed = new EmbedBuilder()
261 | .setTitle("About this bot!")
262 | .setDescription("Apologies, but /info wasn't written yet.")
263 | .setTimestamp()
264 | .setColor(color);
265 |
266 | await interaction.reply({ embeds: [embed], ephemeral: true });
267 | } else if (interaction.commandName === "wipe") {
268 | const llm =
269 | (await db.get(["users", interaction.user.id, "current_bot"]))
270 | .value; // After reading the typedocs I realized this is the cleaner way to do this
271 | const curconv = (await db.get([
272 | "users",
273 | interaction.user.id,
274 | "current_conversation",
275 | ])).value;
276 |
277 | if (llm === null || curconv === null) {
278 | await interaction.reply({
279 | content: "Send a message before wiping your conversation!",
280 | ephemeral: true,
281 | });
282 | return;
283 | }
284 |
285 | const messages = (await db.get([
286 | "users",
287 | interaction.user.id,
288 | "conversations",
289 | llm,
290 | ])).value;
291 |
292 | if (messages === null) {
293 | await interaction.reply({
294 | content: "Send a message before wiping your conversation!",
295 | ephemeral: true,
296 | });
297 | return;
298 | }
299 |
300 | messages[curconv] = {
301 | id: "New Conversation",
302 | messages: [],
303 | };
304 |
305 | await db.set(
306 | ["users", interaction.user.id, "conversations", llm],
307 | messages,
308 | );
309 |
310 | await interaction.reply({
311 | content: `Your conversation with "${llm}" was reset!`,
312 | ephemeral: true,
313 | });
314 | } else if (interaction.commandName === "add-channel") {
315 | await interaction.reply({
316 | content: "Command not implemented",
317 | ephemeral: true,
318 | });
319 | } else if (interaction.commandName === "remove-channel") {
320 | await interaction.reply({
321 | content: "Command not implemented",
322 | ephemeral: true,
323 | });
324 | } else if (interaction.commandName === "set-ai") {
325 | const options = [];
326 |
327 | for (const key in availableLLMs) {
328 | const llm = availableLLMs[key];
329 |
330 | options.push({
331 | label: llm.information.name,
332 | value: llm.information.id,
333 | description: llm.information.description,
334 | });
335 | }
336 |
337 | if (options.length === 0) {
338 | interaction.reply({
339 | content: "No available LLMs! Have the bot host check the logs.",
340 | ephemeral: true,
341 | });
342 | }
343 |
344 | const select = new StringSelectMenuBuilder().setCustomId("set-ai")
345 | .setPlaceholder("Select an AI").addOptions(options);
346 |
347 | const row = new ActionRowBuilder().addComponents(
348 | select,
349 | );
350 |
351 | interaction.reply({
352 | content: "Select an AI to use!",
353 | components: [row],
354 | ephemeral: true,
355 | });
356 | } else if (interaction.commandName === "add-document") {
357 | const attachment = interaction.options.getAttachment("file")
358 | ? interaction.options.getAttachment("file")!
359 | : { contentType: "null", url: "" };
360 |
361 | let attachmentName = interaction.options.getString("file-name");
362 |
363 | if (attachmentName === null) {
364 | attachmentName = "";
365 | }
366 |
367 | console.log(attachment);
368 |
369 | if (attachment.contentType === "text/plain; charset=utf-8") {
370 | await interaction.deferReply({ ephemeral: true });
371 |
372 | const fstatement = await fetch(attachment.url);
373 |
374 | const content = await fstatement.text();
375 |
376 | console.log(content);
377 |
378 | try {
379 | // await addDocument(content, attachmentName);
380 |
381 | // interaction.editReply({ content: "The document has been uploaded and is now in the bot's information database" });
382 |
383 | interaction.editReply({ content: "Vector database disabled!" });
384 | } catch (_err) {
385 | interaction.editReply({
386 | content:
387 | "Something went wrong adding the document! The database may be disabled, please check the logs.",
388 | });
389 | }
390 | } else {
391 | console.log(
392 | "Invalid document given, document was of type",
393 | attachment.contentType,
394 | );
395 | interaction.reply({
396 | content:
397 | `The given document is not a text file! Please send a .txt file to be uploaded. All we know is that you gave us this type of file: "${attachment.contentType}`,
398 | ephemeral: true,
399 | });
400 | }
401 | } else if (interaction.commandName === "create-image-bingchat") {
402 | await interaction.deferReply({ ephemeral: true });
403 |
404 | const prompt = interaction.options.getString("prompt");
405 |
406 | const id = crypto.randomUUID();
407 |
408 | try {
409 | const imageCreator = new BingImageCreator({
410 | userToken: Deno.env.get("BING_COOKIE"),
411 | });
412 |
413 | const imageData = await imageCreator.genImageList(prompt, id, true);
414 |
415 | let resp = `Here's your image${
416 | imageData.length === 1 ? "!" : "s!"
417 | } The prompt you gave me was "${prompt}":\n`;
418 |
419 | imageData.forEach((url: string) => {
420 | resp = resp.concat(`${url}\n`);
421 | });
422 |
423 | interaction.editReply(resp);
424 | } catch (err) {
425 | interaction.editReply(
426 | `Something went wrong making the images! All I know is the error was "${err}".`,
427 | );
428 | }
429 | } else if (interaction.commandName === "oops") {
430 | const llm =
431 | (await db.get(["users", interaction.user.id, "current_bot"]))
432 | .value; // After reading the typedocs I realized this is the cleaner way to do this
433 | const curconv = (await db.get([
434 | "users",
435 | interaction.user.id,
436 | "current_conversation",
437 | ])).value;
438 |
439 | if (llm === null || curconv === null) {
440 | await interaction.reply({
441 | content: "Send a message before wiping your conversation!",
442 | ephemeral: true,
443 | });
444 | return;
445 | }
446 |
447 | if (
448 | (await db.get([
449 | "users",
450 | interaction.user.id,
451 | "messageWaiting",
452 | ])).value == false
453 | ) {
454 | await interaction.reply({
455 | content:
456 | "You haven't sent a message yet or there's no message pending. I don't know what you want me to do here.",
457 | ephemeral: true,
458 | });
459 | return;
460 | }
461 |
462 | await db.set(
463 | ["users", interaction.user.id, "messageWaiting"],
464 | false,
465 | );
466 |
467 | await interaction.reply({
468 | content:
469 | `You should be able to send messages now. "${llm}" no longer thinks you're in a conversation.`,
470 | ephemeral: true,
471 | });
472 | } else if (interaction.commandName === "channel") {
473 | const subcmd = interaction.options.getSubcommand();
474 |
475 | const channel = interaction.options.getChannel("channel");
476 |
477 | const gmember = await interaction.guild?.members.fetch(interaction.user);
478 |
479 | if (
480 | !channel?.id || !interaction.guild?.channels.cache.has(channel.id) ||
481 | !gmember?.permissions.has(PermissionFlagsBits.ManageChannels)
482 | ) {
483 | await interaction.reply({
484 | content: `Channel doesn't exist or you don't have Manage Channels`,
485 | ephemeral: true,
486 | });
487 | return;
488 | }
489 |
490 | if (subcmd === "add") {
491 | await db.set(
492 | ["channels", channel?.id],
493 | true,
494 | );
495 |
496 | await interaction.reply({
497 | content: `Channel ${channel} added!`,
498 | ephemeral: true,
499 | });
500 | } else if (subcmd === "remove") {
501 | await db.set(
502 | ["channels", channel?.id],
503 | false,
504 | );
505 |
506 | await interaction.reply({
507 | content: `Channel ${channel} removed!`,
508 | ephemeral: true,
509 | });
510 | }
511 | } else if (interaction.commandName === "send") {
512 | const llm =
513 | (await db.get(["users", interaction.user.id, "current_bot"])).value; // After reading the typedocs I realized this is the cleaner way to do this
514 |
515 | if (llm === null) {
516 | await interaction.reply({
517 | content: "Looks like this is your first time using this bot! Run /info to learn how to use the full potential of this bot, and set your desired LLM using /set-ai!",
518 | ephemeral: true
519 | });
520 | return;
521 | } else if (
522 | !Object.prototype.hasOwnProperty.call(availableLLMs, llm)
523 | ) {
524 | // current LLM is removed/corrupted
525 | await interaction.reply({
526 | content: "Your current LLM is corrupted or removed! Set a new LLM at /set-ai!",
527 | ephemeral: true
528 | });
529 | return;
530 | }
531 |
532 | if (
533 | availableLLMs[llm].information.highCostLLM &&
534 | Deno.env.get("PREMIUM_ENFORCEMENT") === "true"
535 | ) {
536 | const guild = client.guilds.resolve(Deno.env.get("PRIMARY_GUILD") || "0");
537 | if (guild) {
538 | const member = await guild?.members.fetch(interaction.user.id);
539 | if (
540 | !member.premiumSince &&
541 | Deno.env.get("PRIMARY_GUILD") !== interaction.guild?.id
542 | ) {
543 | interaction.reply({
544 | content: "This LLM is for premium users only! Boost the server to gain access to this LLM, or join the bot host's primary server!",
545 | ephemeral: true
546 | });
547 | return;
548 | }
549 | } else {
550 | interaction.reply({
551 | content: "your developer is terrible at his job (Premium lock config not set properly! This LLM is marked as high-cost, have the owner of the bot finish setup.)",
552 | ephemeral: true
553 | });
554 | return;
555 | }
556 | }
557 |
558 | let isMessageProcessing = (await db.get([
559 | "users",
560 | interaction.user.id,
561 | "messageWaiting",
562 | ])).value;
563 |
564 | if (isMessageProcessing) {
565 | await interaction.reply({
566 | content: "A message is already being processed!",
567 | ephemeral: true
568 | });
569 | return
570 | } else {
571 | isMessageProcessing = true;
572 |
573 | await db.set(
574 | ["users", interaction.user.id, "messageWaiting"],
575 | isMessageProcessing,
576 | );
577 | }
578 |
579 | let curconv = (await db.get([
580 | "users",
581 | interaction.user.id,
582 | "current_conversation",
583 | ])).value;
584 |
585 | if (curconv === null) {
586 | // They haven't used this LLM before
587 | curconv = 0;
588 | await db.set(
589 | ["users", interaction.user.id, "current_conversation"],
590 | curconv,
591 | );
592 | }
593 |
594 | let messages = (await db.get([
595 | "users",
596 | interaction.user.id,
597 | "conversations",
598 | llm,
599 | ])).value!;
600 |
601 | if (messages === null) {
602 | // No conversations for this LLM.
603 | messages = [{
604 | id: "New Conversation",
605 | messages: [],
606 | }];
607 | }
608 |
609 | const curmsgs = messages[curconv].messages;
610 |
611 | const msg = await interaction.deferReply({ephemeral: true})
612 |
613 | const requirements = availableLLMs[llm].information;
614 |
615 | const reqobject: types.Requirements = {};
616 |
617 | /*if (requirements.multiModal) {
618 | const images: string[] = await getImagesFromMessage(message);
619 |
620 | reqobject.images = images;
621 | }*/
622 |
623 | if (requirements.env) {
624 | reqobject.env = {};
625 |
626 | requirements.env.forEach((envValue) => {
627 | if (!Deno.env.get(envValue)) {
628 | throw `Required env value "${envValue}" not found, add it to .env!`;
629 | }
630 |
631 | reqobject.env![envValue] = Deno.env.get(envValue)!;
632 | });
633 |
634 | reqobject.streaming = false; // No.
635 | }
636 | try {
637 | const resp = await availableLLMs[llm].send(
638 | interaction.options.getString("prompt")!,
639 | curmsgs,
640 | null,
641 | reqobject,
642 | );
643 |
644 | messages[curconv].messages = resp.messages;
645 |
646 | await db.set(
647 | ["users", interaction.user.id, "conversations", llm],
648 | messages,
649 | );
650 |
651 | const messagechunks = splitStringIntoChunks(
652 | resp.choices[0].message.content!,
653 | 2000,
654 | );
655 |
656 | let cvalue = 0;
657 |
658 | messagechunks.forEach((chunk) => {
659 | if (cvalue === 0) {
660 | cvalue = 1;
661 | isMessageProcessing = false;
662 |
663 | db.set(
664 | ["users", interaction.user.id, "messageWaiting"],
665 | isMessageProcessing,
666 | );
667 | msg.edit(chunk);
668 | }
669 | });
670 | } catch (err) {
671 | isMessageProcessing = false;
672 |
673 | db.set(
674 | ["users", interaction.user.id, "messageWaiting"],
675 | isMessageProcessing,
676 | );
677 | msg.edit(
678 | "Something went catastrophically wrong! Please tell the bot host to check the logs, thaaaaanks",
679 | );
680 | console.error(
681 | "hey dumbass this error got thrown, go check that thanks:",
682 | err,
683 | );
684 | return;
685 | }
686 | }
687 | });
688 |
--------------------------------------------------------------------------------
/vdb.ts:
--------------------------------------------------------------------------------
1 | import { SupabaseVectorStore } from "npm:langchain/vectorstores/supabase";
2 | import { OpenAIEmbeddings } from "npm:langchain/embeddings/openai";
3 | import {
4 | createClient,
5 | SupabaseClient,
6 | } from "https://esm.sh/@supabase/supabase-js@2.26.0";
7 | import { Document } from "npm:langchain/document";
8 |
9 | import { config } from "npm:dotenv";
10 | config();
11 |
12 | let dbEnabled = true;
13 |
14 | const supabaseKey = Deno.env.get("SUPABASE_SERVICE_ROLE_KEY");
15 | const url = Deno.env.get("SUPABASE_URL");
16 | const apiKey = Deno.env.get("OPENAI_API_KEY");
17 |
18 | let client: SupabaseClient;
19 | let vectorStore: SupabaseVectorStore;
20 |
21 | if (typeof supabaseKey !== "string") {
22 | console.log(
23 | `SUPABASE_SERVICE_ROLE_KEY is not defined in your .env, the database will be disabled.`,
24 | );
25 | dbEnabled = false;
26 | } else if (typeof url !== "string") {
27 | console.log(
28 | `SUPABASE_URL is not defined in your .env, the database will be disabled.`,
29 | );
30 | dbEnabled = false;
31 | } else {
32 | try {
33 | client = createClient(url, supabaseKey, {
34 | auth: { persistSession: false },
35 | });
36 |
37 | vectorStore = await SupabaseVectorStore.fromExistingIndex(
38 | new OpenAIEmbeddings({
39 | openAIApiKey: apiKey,
40 | }),
41 | {
42 | client,
43 | tableName: "documents",
44 | queryName: "match_documents",
45 | },
46 | );
47 | } catch (_err) {
48 | console.warn(
49 | "Something went wrong starting the database, are you sure the API key and Supabase URL are right? The database has been disabled.",
50 | );
51 | dbEnabled = false;
52 | }
53 | }
54 |
55 | export const addDocument = async (
56 | documentContent: string,
57 | documentName: string,
58 | ) => {
59 | if (!dbEnabled) {
60 | throw "Database disabled";
61 | }
62 |
63 | const docsarr = [];
64 |
65 | const document = new Document({
66 | pageContent: documentContent,
67 | metadata: { name: documentName },
68 | });
69 |
70 | docsarr.push(document);
71 |
72 | const res = await vectorStore.addDocuments(docsarr);
73 |
74 | console.log(res);
75 |
76 | return res;
77 | };
78 |
79 | export const getRelevantDocument = async (query: string) => {
80 | try {
81 | if (!dbEnabled) {
82 | return "Database disabled";
83 | }
84 |
85 | let result: Document[] | string = await vectorStore.similaritySearch(
86 | query,
87 | 1,
88 | );
89 |
90 | if (JSON.stringify(result) === JSON.stringify([])) {
91 | result = "No result found";
92 | } else {
93 | result = result[0].pageContent;
94 | }
95 |
96 | console.log(result);
97 |
98 | return result;
99 | } catch (_err) {
100 | return "Something went wrong trying to get information from the database!";
101 | }
102 | };
103 |
--------------------------------------------------------------------------------