├── .env.template
├── .github
└── workflows
│ └── npm-publish.yml
├── .gitignore
├── .npmignore
├── .prettierrc
├── README.md
├── hooks
└── pre-commit
├── package.json
├── setup.sh
├── src
├── agent.ts
├── decorators.ts
├── helpers.ts
├── index.ts
├── providers
│ ├── anthropic.ts
│ ├── google.ts
│ ├── groq.ts
│ ├── index.ts
│ └── openai.ts
└── types
│ ├── agent.ts
│ ├── index.ts
│ ├── messages.ts
│ ├── providers.ts
│ └── utilities
│ ├── hooks.ts
│ ├── index.ts
│ ├── jobs.ts
│ ├── middleware.ts
│ └── tools.ts
├── tsconfig.json
└── tsup.config.ts
/.env.template:
--------------------------------------------------------------------------------
1 | # Anthropic
2 | ANTHROPIC_API_KEY=sk-ant-XXXXXXXXXXXXXXXXXX
3 |
4 | # OpenAI
5 | OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXX
6 |
--------------------------------------------------------------------------------
/.github/workflows/npm-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will run tests using node and then publish a package to GitHub Packages when a release is created
2 | # For more information see: https://docs.github.com/en/actions/publishing-packages/publishing-nodejs-packages
3 |
4 | name: Deploy Magma to NPM
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | - release/*
11 |
12 | jobs:
13 | publish-npm:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v4
17 | - uses: actions/setup-node@v4
18 | with:
19 | node-version: 20
20 | registry-url: https://registry.npmjs.org/
21 | - run: npm install
22 | - run: npm run build --if-present
23 |
24 | # Get branch name
25 | - name: Get branch name
26 | id: branch-name
27 | run: |
28 | BRANCH=${GITHUB_REF#refs/heads/}
29 | echo "branch=$BRANCH" >> $GITHUB_OUTPUT
30 |
31 | # For release/* branches, update version and publish with tag
32 | - name: Update version for release branches
33 | if: startsWith(github.ref, 'refs/heads/release/')
34 | run: |
35 | BRANCH="${{ steps.branch-name.outputs.branch }}"
36 | BRANCH_SLUG=$(echo $BRANCH | sed 's/\//-/g')
37 | CURRENT_VERSION=$(node -p "require('./package.json').version")
38 | NEW_VERSION="${CURRENT_VERSION}-${BRANCH_SLUG}.$(date +'%Y%m%d%H%M%S')"
39 | npm version $NEW_VERSION --no-git-tag-version
40 | echo "Publishing version: $NEW_VERSION with tag ${BRANCH_SLUG}"
41 | npm publish --tag ${BRANCH_SLUG}
42 | env:
43 | NODE_AUTH_TOKEN: ${{secrets.NPM_SECRET}}
44 |
45 | # For main branch, publish normally
46 | - name: Publish main branch
47 | if: github.ref == 'refs/heads/main'
48 | run: npm publish
49 | env:
50 | NODE_AUTH_TOKEN: ${{secrets.NPM_SECRET}}
51 |
52 | - name: Successful Deployment Notification
53 | if: success() && !contains(github.event.head_commit.message, '[no-notify]')
54 | run: |
55 | curl -X POST -H 'Content-type: application/json' --data '{
56 | "blocks": [
57 | {
58 | "type": "header",
59 | "text": {
60 | "type": "plain_text",
61 | "text": "🚀 Successful Deployment of Magma Framework",
62 | "emoji": true
63 | }
64 | },
65 | {
66 | "type": "section",
67 | "fields": [
68 | {
69 | "type": "mrkdwn",
70 | "text": "*Commit:*\n`${{ github.sha }}`"
71 | },
72 | {
73 | "type": "mrkdwn",
74 | "text": "*Author:*\n${{ github.actor }}"
75 | }
76 | ]
77 | },
78 | {
79 | "type": "section",
80 | "fields": [
81 | {
82 | "type": "mrkdwn",
83 | "text": "*Changes:*\n${{ github.event.head_commit.message }}"
84 | }
85 | ]
86 | }
87 | ]
88 | }' ${{ secrets.SLACK_WEBHOOK_URL }}
89 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 | .vscode/
3 | dist/
4 | node_modules/
5 | package-lock.json
6 | Icon*
7 |
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
1 | .env*
2 | .vscode/
3 | .eslint*
4 | .prettier*
5 | node_modules/
6 | demos/
7 | docs/
8 | src/**/*.ts
9 | index.ts
10 | tsconfig.json
11 | package-lock.json
12 | *.sh
13 | hooks/
14 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "trailingComma": "es5",
3 | "tabWidth": 4,
4 | "semi": true,
5 | "singleQuote": true,
6 | "printWidth": 100
7 | }
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
4 |
5 |
6 |
7 | Turn your workflows into a workforce.
8 | Create and deploy conversational agents without any of the headaches.
9 |
10 |
11 |
12 |
13 |
14 | [](https://www.npmjs.com/package/@pompeii-labs/magma)
15 | [](https://join.slack.com/t/magmacommunity/shared_invite/zt-2tghhq3av-Xn9k9ntwN5ZwqvxbWcfsTg)
16 | [](https://github.com/pompeii-labs/magma)
17 |
18 |
19 |
20 | ## 🌋 What is Magma?
21 |
22 | Magma is a framework that lets you create AI agents without the headache. No complex chains, no confusing abstractions - just write the logic you want your agent to have.
23 |
24 | Want to try it out? [Chat with Dialog](https://chat.productdialog.com/ac94ab36-c5bb-4b54-a195-2b6b2499dcff), our user research agent built with Magma!
25 |
26 | ## ⚡️ Quick Start
27 |
28 | 1. Install Magma:
29 | ```bash
30 | npm i @pompeii-labs/magma
31 | ```
32 |
33 | 2. Create your first agent:
34 | ```ts
35 | import { MagmaAgent } from "@pompeii-labs/magma";
36 |
37 | // Magma Agents are class based, so you can extend them with your own methods
38 | class MyAgent extends MagmaAgent {
39 |
40 | // Want to give it some personality? Add system prompts:
41 | getSystemPrompts() {
42 | return [{
43 | role: "system",
44 | content: "You are a friendly assistant who loves dad jokes"
45 | }];
46 | }
47 | }
48 |
49 | // That's it! You've got a working agent
50 | const myAgent = new MyAgent();
51 |
52 | // Run it:
53 | const reply = await myAgent.main();
54 | console.log(reply.content);
55 | ```
56 |
57 | ## 🔥 Key Features
58 |
59 | - **Simple**: Build agents in minutes with minimal code
60 | - **Flexible**: Use any AI provider (OpenAI, Anthropic, Groq)
61 | - **Hosted**: Deploy your agents in seconds with the [MagmaDeploy platform](https://magmadeploy.com)
62 | - **Powerful**: Add tools and middleware when you need them
63 | - **Observable**: See exactly what your agent is doing
64 |
65 | ## 🛠 Examples
66 |
67 | ### Add Tools
68 | Tools give your agent the ability to perform actions. Any method decorated with @tool and @toolparam will be available for the agent to use.
69 |
70 | **Important Notes**:
71 | - Every tool method must return a string
72 | - Every tool has `call` as a required parameter, which is the `MagmaToolCall` object
73 | - Tools are executed in sequence
74 | ```ts
75 | import { MagmaAgent } from "@pompeii-labs/magma";
76 | import { tool, toolparam } from "@pompeii-labs/magma/decorators";
77 |
78 | /** Decorate any agent class method with @toolparam or @tool.
79 | * @tool is used to define the tool itself
80 | * @toolparam is used to define the parameters of the tool (key, type, description, required)
81 | */
82 | class MyAgent extends MagmaAgent {
83 | @tool({ name: "search_database", description: "Search the database for records" })
84 | @toolparam({
85 | key: "query",
86 | type: "string",
87 | description: "Search query",
88 | required: true
89 | })
90 | @toolparam({
91 | key: "filters",
92 | type: "object",
93 | properties: [
94 | { key: "date", type: "string" },
95 | { key: "category", type: "string", enum: ["A", "B", "C"] }
96 | ]
97 | })
98 | async searchDatabase(call: MagmaToolCall) {
99 | const { query, filters } = call.fn_args;
100 |
101 | const results = await this.searchDatabase(query, filters);
102 |
103 | return "Here are the results of your search: " + JSON.stringify(results);
104 | }
105 | }
106 | ```
107 |
108 | ### Add Middleware
109 | Middleware is a novel concept to Magma. It allows you to add custom logic to your agent before or after a tool is executed.
110 |
111 | This is a great way to add custom logging, validation, data sanitization, etc.
112 |
113 | **Types**:
114 | - "preCompletion": Runs before the LLM call is made, takes in a MagmaUserMessage
115 | - "onCompletion": Runs after the agent generates a text response, takes in a MagmaAssistantMessage
116 | - "preToolExecution": Runs before a tool is executed, takes in a MagmaToolCall
117 | - "onToolExecution": Runs after a tool is executed, takes in a MagmaToolResult
118 |
119 | **Important Notes**:
120 | - You can have unlimited middleware methods
121 | - Middleware methods can manipulate the message they take in
122 | - Middleware methods can throw errors to adjust the flow of the agent
123 |
124 | **Error Handling**:
125 | - If preCompletion middleware throws an error, the error message is supplied as if it were the assistant message. The user and assistant messages are also removed from the conversation history
126 | - If onCompletion middleware throws an error, the error message is supplied to the LLM, and it tries to regenerate a response. The assistant message is not added to the conversation history
127 | - If preToolExecution middleware throws an error, the error message is supplied as if it were the response from the tool
128 | - If onToolExecution middleware throws an error, the error message is supplied as if it were the response from the tool
129 | ```ts
130 | import { MagmaAgent } from "@pompeii-labs/magma";
131 | import { middleware } from "@pompeii-labs/magma/decorators";
132 |
133 | /**
134 | * Decorate any agent class method with @middleware to add custom logging, validation, etc.
135 | * Types: "preCompletion", "onCompletion", "preToolExecution", "onToolExecution"
136 | */
137 | class MyAgent extends MagmaAgent {
138 |
139 | @middleware("onCompletion")
140 | async logBeforeCompletion(message) {
141 | if (message.content.includes("bad word")) {
142 | throw new Error("You just used a bad word, please try again.");
143 | }
144 | }
145 | }
146 | ```
147 |
148 | ### Schedule Jobs
149 | Jobs allow you to schedule functions within your agent. Jobs conform to the standard UNIX cron syntax (https://crontab.guru/).
150 |
151 | **Important Notes**:
152 | - Jobs should be static methods, so they can run without instantiating the agent.
153 | - Jobs do not take in any parameters, and they do not return anything.
154 | ```ts
155 | import { MagmaAgent } from "@pompeii-labs/magma";
156 | import { job } from "@pompeii-labs/magma/decorators";
157 |
158 | class MyAgent extends MagmaAgent {
159 | // Run every day at midnight
160 | @job("0 0 * * *")
161 | static async dailyCleanup() {
162 | await this.cleanDatabase();
163 | }
164 |
165 | // Run every hour with timezone
166 | @job("0 * * * *", { timezone: "America/New_York" })
167 | static async hourlySync() {
168 | await this.syncData();
169 | }
170 | }
171 | ```
172 |
173 | ### Expose Hooks
174 | Hooks allow you to expose your agent as an API. Any method decorated with @hook will be exposed as an endpoint.
175 |
176 |
177 | **Important Notes**:
178 | - Hooks are static methods, so they can run without instantiating the agent.
179 | - Hooks are exposed at `/hooks/{hook_name}` in the Magma API
180 | - The only parameter to hook functions is the request object, which is an instance of `express.Request`
181 | ```ts
182 | import { MagmaAgent } from "@pompeii-labs/magma";
183 | import { hook } from "@pompeii-labs/magma/decorators";
184 | import { Request } from "express";
185 |
186 | class MyAgent extends MagmaAgent {
187 |
188 | @hook('notification')
189 | static async handleNotification(req: Request) {
190 | await this.processNotification(req.body);
191 | }
192 | }
193 | ```
194 |
195 | ### Use Different Providers
196 | You can use any supported provider by setting the providerConfig.
197 |
198 | **Important Notes**:
199 | - You can set the providerConfig in the constructor, or by calling `setProviderConfig`
200 | - You do not need to adjust any of your tools, middleware, jobs, or hooks to use a different provider. Magma will handle the rest.
201 | ```ts
202 | class Agent extends MagmaAgent {
203 | constructor() {
204 | // Use OpenAI (default)
205 | super({
206 | providerConfig: {
207 | provider: "openai",
208 | model: "gpt-4o"
209 | }
210 | });
211 |
212 | // Use Anthropic
213 | this.setProviderConfig({
214 | provider: "anthropic",
215 | model: "claude-3.5-sonnet-20240620"
216 | });
217 |
218 | // Use Groq
219 | this.setProviderConfig({
220 | provider: "groq",
221 | model: "llama-3.1-70b-versatile"
222 | });
223 | }
224 | }
225 | ```
226 |
227 | ### State Management
228 | Every Tool, Middleware, Hook, and Job is passed the instance of the agent. This allows you to manipulate agent state and call agent functions in Utility classes
229 |
230 | ```ts
231 | class MyAgent extends MagmaAgent {
232 | // Using a field to store data
233 | myQuery: string;
234 | counter: number;
235 |
236 | async setup() {
237 | this.myQuery = "Hello, World!";
238 | this.counter = 0;
239 | }
240 |
241 | @tool({ description: "Increment the counter" })
242 | async increment() {
243 | this.counter++;
244 | return `Counter is now ${this.counter}`;
245 | }
246 |
247 | @tool({ name: "api_call" })
248 | async apiCall() {
249 | const response = await fetch("https://myapi.com/data", {
250 | body: JSON.stringify({
251 | query: this.myQuery
252 | })
253 | });
254 |
255 | return JSON.stringify(response.json());
256 | }
257 | }
258 | ```
259 |
260 | ### Core Methods
261 | ```ts
262 | import { MagmaAgent } from "@pompeii-labs/magma";
263 |
264 | class MyAgent extends MagmaAgent {
265 | // Initialize your agent
266 | async setup() {
267 | // Load resources, connect to databases, etc.
268 | await this.loadDatabase();
269 | return "I'm ready to help!";
270 | }
271 |
272 | // Handle incoming messages
273 | async receive(message: any) {
274 | // Process user input before main() is called
275 | if (message.type === 'image') {
276 | await this.processImage(message.content);
277 | }
278 | }
279 |
280 | // Clean up resources
281 | async cleanup();
282 |
283 | // Manually trigger a specific tool
284 | async trigger({ name: "get_weather" });
285 |
286 | // Stop the current execution
287 | kill();
288 | }
289 | ```
290 |
291 | ### Event Handlers
292 | Event handlers are optional methods that allow you to tack on custom logic to various events in the agent lifecycle.
293 | ```ts
294 | import { MagmaAgent } from "@pompeii-labs/magma";
295 |
296 | class MyAgent extends MagmaAgent {
297 | // Handle agent shutdown
298 | async onCleanup() {
299 | console.log("Agent shutting down...");
300 | }
301 |
302 | // Handle errors
303 | async onError(error: Error) {
304 | console.error("Something went wrong:", error);
305 | await this.notifyAdmin(error);
306 | }
307 |
308 | // Track token usage
309 | async onUsageUpdate(usage: MagmaUsage) {
310 | await this.saveUsageMetrics(usage);
311 | }
312 |
313 | // Process streaming responses
314 | async onStreamChunk(chunk: MagmaStreamChunk) {
315 | console.log("Received chunk:", chunk.content);
316 | }
317 | }
318 | ```
319 |
320 | ## 📚 Want More?
321 |
322 | - Join our [Slack Community](https://join.slack.com/t/magmacommunity/shared_invite/zt-2tghhq3av-Xn9k9ntwN5ZwqvxbWcfsTg)
323 | - Star us on [GitHub](https://github.com/pompeii-labs/magma)
324 |
325 | ## 📝 License
326 |
327 | Magma is [Apache 2.0 licensed](LICENSE).
328 |
--------------------------------------------------------------------------------
/hooks/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Add forbidden strings
4 | FORBIDDEN_STRINGS="magma.ngrok.app"
5 |
6 | # Check for forbidden strings in staged files
7 | for file in $(git diff --cached --name-only); do
8 | for pattern in $FORBIDDEN_STRINGS; do
9 | if git diff --cached "$file" | grep -q "$pattern"; then
10 | echo "Error: Forbidden string '$pattern' found in $file"
11 | echo "Please remove this string before committing."
12 | exit 1
13 | fi
14 | done
15 | done
16 |
17 | # Stash unstaged changes
18 | git stash -q --keep-index
19 |
20 | # Store originally staged files
21 | STAGED_FILES=$(git diff --cached --name-only)
22 |
23 | # Run lint and format
24 | npm run lint-format
25 |
26 | # Store the last exit code
27 | RESULT=$?
28 |
29 | # Add back only the files that were originally staged
30 | for file in $STAGED_FILES; do
31 | git add "$file"
32 | done
33 |
34 | # Unstash changes
35 | git stash pop -q
36 |
37 | # If the lint-format command failed, exit with an error status
38 | if [ $RESULT -ne 0 ]; then
39 | echo "Linting or formatting failed. Please fix the issues and try committing again."
40 | exit 1
41 | fi
42 |
43 | # Echo
44 | echo "Linting and formatting check passed"
45 |
46 | # Return the exit code
47 | exit 0
48 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@pompeii-labs/magma",
3 | "version": "1.6.0",
4 | "description": "The Typescript framework to build AI agents quickly and easily",
5 | "keywords": [
6 | "Agents",
7 | "Typescript",
8 | "AI",
9 | "Agentic",
10 | "LLM",
11 | "Workflows",
12 | "Middleware"
13 | ],
14 | "license": "Apache-2.0",
15 | "author": "Pompeii Labs, Inc.",
16 | "files": [
17 | "dist"
18 | ],
19 | "repository": "pompeii-labs/magma",
20 | "main": "dist/index.js",
21 | "types": "dist/index.d.ts",
22 | "exports": {
23 | ".": {
24 | "types": "./dist/index.d.ts",
25 | "default": "./dist/index.js"
26 | },
27 | "./decorators": {
28 | "types": "./dist/decorators.d.ts",
29 | "default": "./dist/decorators.js"
30 | },
31 | "./types": {
32 | "types": "./dist/types/index.d.ts",
33 | "default": "./dist/types/index.js"
34 | }
35 | },
36 | "scripts": {
37 | "format": "prettier --write \"src/**/*.ts\"",
38 | "build": "npm run format ; tsup",
39 | "dev": "tsup --watch"
40 | },
41 | "devDependencies": {
42 | "@swc/core": "^1.5.1",
43 | "@types/node-cron": "^3.0.11",
44 | "@types/ws": "^8.5.12",
45 | "@typescript-eslint/eslint-plugin": "^6.21.0",
46 | "@typescript-eslint/parser": "^6.21.0",
47 | "prettier": "^3.2.5",
48 | "tsup": "^8.0.2",
49 | "typescript": "^5.6.2"
50 | },
51 | "dependencies": {
52 | "dotenv": "^16.4.5",
53 | "node-cron": "^3.0.3"
54 | },
55 | "peerDependencies": {
56 | "@anthropic-ai/sdk": "~0.39.0",
57 | "@google/generative-ai": "~0.24.0",
58 | "@types/express": "~5.0.1",
59 | "groq-sdk": "~0.15.0",
60 | "openai": "~4.86.2",
61 | "ws": "~8.18.0"
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Install dependencies
4 | npm install
5 |
6 | # Install all hooks
7 | cp hooks/* .git/hooks/
8 | chmod +x .git/hooks/*
9 |
--------------------------------------------------------------------------------
/src/agent.ts:
--------------------------------------------------------------------------------
1 | import {
2 | MagmaAssistantMessage,
3 | MagmaMessage,
4 | MagmaProviderConfig,
5 | MagmaTool,
6 | MagmaMiddleware,
7 | MagmaMiddlewareTriggerType,
8 | MagmaStreamChunk,
9 | MagmaToolResult,
10 | MagmaMiddlewareReturnType,
11 | MagmaUtilities,
12 | MagmaHook,
13 | MagmaJob,
14 | MagmaCompletionConfig,
15 | MagmaToolResultBlock,
16 | MagmaMessageType,
17 | MagmaMiddlewareParamType,
18 | MagmaSystemMessageType,
19 | MagmaSystemMessage,
20 | } from './types';
21 | import { Provider } from './providers';
22 | import { hash, loadHooks, loadJobs, loadMiddleware, loadTools, sanitizeMessages } from './helpers';
23 | import OpenAI from 'openai';
24 | import Anthropic from '@anthropic-ai/sdk';
25 | import Groq from 'groq-sdk';
26 | import cron from 'node-cron';
27 | import { GoogleGenerativeAI } from '@google/generative-ai';
28 | const kMiddlewareMaxRetries = 5;
29 |
30 | type AgentProps = MagmaProviderConfig & {
31 | verbose?: boolean;
32 | messageContext?: number;
33 | stream?: boolean;
34 | };
35 |
36 | export class MagmaAgent {
37 | verbose?: boolean;
38 | stream: boolean = false;
39 | private providerConfig: MagmaProviderConfig;
40 | private retryCount: number;
41 | private messages: MagmaMessage[];
42 | private middlewareRetries: Record;
43 | private messageContext: number;
44 | private scheduledJobs: cron.ScheduledTask[];
45 | private abortControllers: Map = new Map();
46 |
47 | constructor(args?: AgentProps) {
48 | this.messageContext = args?.messageContext ?? 20;
49 | this.verbose = args?.verbose ?? false;
50 | this.stream = args?.stream ?? false;
51 |
52 | args ??= {
53 | provider: 'anthropic',
54 | model: 'claude-3-5-sonnet-latest',
55 | };
56 |
57 | const providerConfig = {
58 | provider: args.provider,
59 | model: args.model,
60 | settings: args.settings,
61 | client: args.client,
62 | } as MagmaProviderConfig;
63 |
64 | this.setProviderConfig(providerConfig);
65 |
66 | this.messages = [];
67 | this.retryCount = 0;
68 | this.middlewareRetries = {};
69 |
70 | this.scheduledJobs = [];
71 |
72 | this.log('Agent initialized');
73 | }
74 |
75 | public log(message: string): void {
76 | if (this.verbose) {
77 | console.log(message);
78 | }
79 | }
80 |
81 | public async setup?(opts?: object): Promise {}
82 |
83 | /**
84 | * Optional method to receive input from the user
85 | * @param message message object received from the user - type to be defined by extending class
86 | */
87 | public async receive?(message: any): Promise {}
88 |
89 | public async cleanup(): Promise {
90 | try {
91 | await this.onCleanup();
92 | } catch (error) {
93 | this.log(`Error during cleanup: ${error.message ?? 'Unknown'}`);
94 | } finally {
95 | this._cleanup();
96 | }
97 | }
98 |
99 | private async _cleanup(): Promise {
100 | this.abortControllers.forEach((controller) => controller.abort());
101 | this.abortControllers.clear();
102 |
103 | this.messages = [];
104 |
105 | this.log('Agent cleanup complete');
106 | }
107 |
108 | /**
109 | * Manually trigger a tool call in the context of the conversation
110 | *
111 | * @param args.name The name of the tool to run
112 | * @param args.tool The Magma tool to run
113 | * Either `name` or `tool` must be provided. Tool will be prioritized if both are provided.
114 | * @param args.addToConversation Whether the tool call should be added to the conversation history (default: false)
115 | * @throws if no tool matching tool is found
116 | */
117 | public async trigger(
118 | args: {
119 | name?: string;
120 | tool?: MagmaTool;
121 | addToConversation?: boolean;
122 | },
123 | config?: MagmaProviderConfig,
124 | parentRequestIds: string[] = []
125 | ): Promise {
126 | const requestId = Math.random().toString(36).substring(2, 15);
127 | sanitizeMessages(this.messages);
128 | const tool = args.tool ?? this.tools.find((t) => t.name === args.name);
129 |
130 | if (!tool) throw new Error('No tool found to trigger');
131 |
132 | args.addToConversation ??= false;
133 |
134 | try {
135 | // this promise will resolve when either trigger finishes or the abort controller is aborted
136 | const triggerPromise = new Promise(
137 | async (resolve) => {
138 | for (const [key, controller] of this.abortControllers.entries()) {
139 | if (!parentRequestIds?.includes(key)) {
140 | controller.abort();
141 | this.abortControllers.delete(key);
142 | }
143 | }
144 |
145 | const abortController = new AbortController();
146 | this.abortControllers.set(requestId, abortController);
147 | abortController.signal.onabort = () => {
148 | this.abortControllers.delete(requestId);
149 | return resolve(null);
150 | };
151 |
152 | const startingProviderConfig = this.providerConfig;
153 |
154 | if (config?.['provider']) {
155 | this.setProviderConfig(config);
156 | }
157 |
158 | const provider = Provider.factory(this.providerConfig.provider);
159 |
160 | const messages = [
161 | ...this.getSystemPrompts().map((s) => new MagmaSystemMessage(s)),
162 | ...this.getMessages(this.messageContext),
163 | ];
164 | if (messages.length > 0 && messages.at(-1).role === 'assistant') {
165 | messages[messages.length - 1].blocks = messages[
166 | messages.length - 1
167 | ].blocks.filter((block) => block.type !== 'tool_call');
168 | }
169 |
170 | const completionConfig: MagmaCompletionConfig = {
171 | providerConfig: this.providerConfig,
172 | messages,
173 | tools: [tool],
174 | tool_choice: tool.name,
175 | stream: this.stream,
176 | };
177 |
178 | if (!this.abortControllers.has(requestId)) {
179 | return resolve(null);
180 | }
181 |
182 | const completion = await provider.makeCompletionRequest({
183 | config: completionConfig,
184 | onStreamChunk: this.onStreamChunk.bind(this),
185 | attempt: 0,
186 | signal: this.abortControllers.get(requestId)?.signal,
187 | agent: this,
188 | });
189 |
190 | if (completion === null) {
191 | return resolve(null);
192 | }
193 |
194 | this.setProviderConfig(startingProviderConfig);
195 |
196 | this.onUsageUpdate(completion.usage);
197 |
198 | const call = completion.message;
199 |
200 | // If the tool call is not `inConversation`, we just return the result
201 | if (!args.addToConversation) {
202 | const toolResults = await this.executeTools(call, [tool.name]);
203 | return resolve(toolResults[0]);
204 | }
205 |
206 | let modifiedMessage: MagmaMessage;
207 | try {
208 | modifiedMessage = await this.runMiddleware(
209 | 'onCompletion',
210 | completion.message
211 | );
212 | this.messages.push(modifiedMessage);
213 | } catch (error) {
214 | if (this.messages.at(-1).role === 'assistant') {
215 | this.messages.pop();
216 | }
217 |
218 | this.addMessage({
219 | role: 'system',
220 | content: error.message,
221 | });
222 |
223 | return resolve(
224 | await this.trigger(args, config, [...parentRequestIds, requestId])
225 | );
226 | }
227 |
228 | if (!modifiedMessage) {
229 | throw new Error(
230 | `Catastrophic error: failed onCompletion middleware ${kMiddlewareMaxRetries} times`
231 | );
232 | }
233 |
234 | const toolResults = await this.executeTools(completion.message, [tool.name]);
235 |
236 | if (toolResults.length > 0) {
237 | this.messages.push(
238 | new MagmaMessage({
239 | role: 'user',
240 | blocks: toolResults.map((t) => ({
241 | type: 'tool_result',
242 | tool_result: t,
243 | })),
244 | })
245 | );
246 |
247 | // Trigger another completion because last message was a tool call
248 | return resolve(await this.main(config, [...parentRequestIds, requestId]));
249 | }
250 |
251 | return resolve(modifiedMessage as MagmaAssistantMessage);
252 | }
253 | );
254 |
255 | return await triggerPromise;
256 | } catch (error) {
257 | try {
258 | this.onError(error);
259 | } catch {
260 | throw error;
261 | }
262 | } finally {
263 | this.abortControllers.delete(requestId);
264 | }
265 | }
266 |
267 | /**
268 | * Handles the completion process by interacting with the configured provider and executing middleware.
269 | * This function performs the following steps:
270 | * 1. Retrieves the provider instance based on the configured provider name.
271 | * 2. Executes the 'preCompletion' middleware, which can modify the last message before making the AI request.
272 | * 3. Configures the completion request with necessary parameters such as model, messages, and tools.
273 | * 4. Sends the completion request to the provider and updates usage statistics.
274 | * 5. If the response indicates a 'tool_call', runs the 'preToolExecution' middleware and executes the appropriate tool.
275 | * 6. If not a tool call, runs the 'onCompletion' middleware with the returned message.
276 | *
277 | * If an error occurs during the process, the function will either trigger an error update handler or rethrow the error.
278 | *
279 | * @returns { MagmaMessage } A promise that resolves to a `MagmaMessage` object, which is either the final message
280 | * returned from the provider or the result of a tool execution
281 | *
282 | * @throws Will rethrow the error if no `onError` handler is defined
283 | */
284 | public async main(
285 | config?: MagmaProviderConfig,
286 | parentRequestIds: string[] = []
287 | ): Promise {
288 | const requestId = Math.random().toString(36).substring(2, 15);
289 | sanitizeMessages(this.messages);
290 | // console.log(requestId);
291 | try {
292 | // this promise will resolve when either main finishes or the abort controller is aborted
293 | const mainPromise = new Promise(async (resolve) => {
294 | // if we have an abort controller, abort it
295 | // create a new abort controller for this request
296 | // add an onabort handler to the abort controller that will resolve the promise with null
297 | // if the abort controller is aborted
298 | // if the abort controller is not aborted, the promise will resolve with the result of the main function
299 |
300 | for (const [key, controller] of this.abortControllers.entries()) {
301 | if (!parentRequestIds.includes(key)) {
302 | // console.log('Aborting and removing controller for request', key);
303 | controller.abort();
304 | this.abortControllers.delete(key);
305 | }
306 | }
307 |
308 | const abortController = new AbortController();
309 | this.abortControllers.set(requestId, abortController);
310 | abortController.signal.onabort = () => {
311 | // console.log('Controller for request', requestId, 'aborted and reset');
312 | this.abortControllers.delete(requestId);
313 | return resolve(null);
314 | };
315 |
316 | // Call 'preCompletion' middleware
317 | const lastMessage = this.messages[this.messages.length - 1];
318 | if (!lastMessage) {
319 | console.error('Cannot generate message without input');
320 | return resolve(null);
321 | }
322 |
323 | let middlewareResult: MagmaMessage;
324 | try {
325 | middlewareResult = await this.runMiddleware('preCompletion', lastMessage);
326 | } catch (error) {
327 | if (lastMessage.role === 'user') {
328 | this.messages.pop();
329 | }
330 |
331 | // console.log('Error in preCompletion middleware', error);
332 |
333 | return resolve(
334 | new MagmaAssistantMessage({
335 | role: 'assistant',
336 | content: error.message,
337 | })
338 | );
339 | }
340 |
341 | if (!middlewareResult) {
342 | throw new Error(
343 | `Catastrophic error: failed preCompletion middleware ${kMiddlewareMaxRetries} times`
344 | );
345 | }
346 |
347 | this.messages[this.messages.length - 1] = middlewareResult;
348 |
349 | const startingProviderConfig = this.providerConfig;
350 |
351 | if (config?.['provider']) {
352 | this.setProviderConfig(config);
353 | }
354 |
355 | const provider = Provider.factory(this.providerConfig.provider);
356 |
357 | // console.log('Messages before completion', JSON.stringify(this.messages, null, 2));
358 | const completionConfig: MagmaCompletionConfig = {
359 | providerConfig: this.providerConfig,
360 | messages: [
361 | ...this.getSystemPrompts().map((s) => new MagmaSystemMessage(s)),
362 | ...this.getMessages(this.messageContext),
363 | ],
364 | stream: this.stream,
365 | tools: this.tools.filter((t) => t.enabled(this)),
366 | };
367 |
368 | if (!this.abortControllers.has(requestId)) {
369 | // console.log('Controller for request', requestId, 'not found, returning null');
370 | return resolve(null);
371 | }
372 |
373 | const completion = await provider.makeCompletionRequest({
374 | config: completionConfig,
375 | onStreamChunk: this.onStreamChunk.bind(this),
376 | attempt: 0,
377 | signal: this.abortControllers.get(requestId)?.signal,
378 | agent: this,
379 | });
380 |
381 | if (completion === null) {
382 | // console.log('Completion returned null, returning null for request', requestId);
383 | return resolve(null);
384 | }
385 |
386 | this.setProviderConfig(startingProviderConfig);
387 |
388 | this.onUsageUpdate(completion.usage);
389 |
390 | let modifiedMessage: MagmaMessage;
391 | try {
392 | modifiedMessage = await this.runMiddleware('onCompletion', completion.message);
393 | this.messages.push(modifiedMessage);
394 | } catch (error) {
395 | if (this.messages.at(-1).role === 'assistant') {
396 | this.messages.pop();
397 | }
398 |
399 | this.addMessage({
400 | role: 'system',
401 | content: error.message,
402 | });
403 |
404 | // console.log('Error in onCompletion middleware, retrying', error);
405 |
406 | return resolve(await this.main(config, [...parentRequestIds, requestId]));
407 | }
408 |
409 | if (!modifiedMessage) {
410 | throw new Error(
411 | `Catastrophic error: failed onCompletion middleware ${kMiddlewareMaxRetries} times`
412 | );
413 | }
414 |
415 | const toolResults = await this.executeTools(completion.message);
416 |
417 | if (!this.abortControllers.has(requestId)) {
418 | // console.log('Controller for request', requestId, 'not found, returning null');
419 | return resolve(null);
420 | }
421 |
422 | if (toolResults.length > 0) {
423 | this.messages.push(
424 | new MagmaMessage({
425 | role: 'user',
426 | blocks: toolResults.map((t) => ({
427 | type: 'tool_result',
428 | tool_result: t,
429 | })),
430 | })
431 | );
432 |
433 | // console.log('Tool results found, retrying');
434 |
435 | // Trigger another completion because last message was a tool call
436 | return resolve(await this.main(config, [...parentRequestIds, requestId]));
437 | }
438 |
439 | try {
440 | modifiedMessage = await this.runMiddleware('onMainFinish', modifiedMessage);
441 | if (modifiedMessage) {
442 | this.messages[this.messages.length - 1] = modifiedMessage;
443 | }
444 | } catch (error) {
445 | if (this.messages.at(-1).role === 'assistant') {
446 | this.messages.pop();
447 | }
448 |
449 | this.addMessage({
450 | role: 'system',
451 | content: error.message,
452 | });
453 |
454 | // console.log('Error in onMainFinish middleware, retrying', error);
455 |
456 | return resolve(await this.main(config, [...parentRequestIds, requestId]));
457 | }
458 |
459 | if (!modifiedMessage) {
460 | throw new Error(
461 | `Catastrophic error: failed onMainFinish middleware ${kMiddlewareMaxRetries} times`
462 | );
463 | }
464 |
465 | try {
466 | modifiedMessage = await this.runMiddleware('postProcess', modifiedMessage);
467 | } catch (error) {
468 | if (this.messages.at(-1).role === 'assistant') {
469 | this.messages.pop();
470 | }
471 |
472 | this.addMessage({
473 | role: 'system',
474 | content: error.message,
475 | });
476 |
477 | // console.log('Error in postProcess middleware, retrying', error);
478 |
479 | return resolve(await this.main(config, [...parentRequestIds, requestId]));
480 | }
481 |
482 | // console.log('Main finished, returning message');
483 |
484 | return resolve(modifiedMessage as MagmaAssistantMessage);
485 | });
486 |
487 | return await mainPromise;
488 | } catch (error) {
489 | try {
490 | this.onError(error);
491 | } catch {
492 | throw error;
493 | }
494 | } finally {
495 | this.abortControllers.delete(requestId);
496 | }
497 | }
498 |
499 | /**
500 | * Set the provider configuration for the agent
501 | * @param providerConfig provider configuration
502 | */
503 | public setProviderConfig(providerConfig: MagmaProviderConfig): void {
504 | if (!providerConfig.client && !providerConfig.provider) {
505 | throw new Error('Provider client or provider must be defined');
506 | }
507 |
508 | // Set the client based on the provider if not provided
509 | if (!providerConfig.client) {
510 | switch (providerConfig.provider) {
511 | case 'openai':
512 | providerConfig.client ??= new OpenAI();
513 | break;
514 | case 'anthropic':
515 | providerConfig.client ??= new Anthropic();
516 | break;
517 | case 'groq':
518 | providerConfig.client ??= new Groq();
519 | break;
520 | case 'google':
521 | providerConfig.client ??= new GoogleGenerativeAI(process.env.GOOGLE_API_KEY);
522 | break;
523 | default:
524 | throw new Error('Invalid provider');
525 | }
526 | }
527 |
528 | this.providerConfig = providerConfig;
529 | }
530 |
531 | /**
532 | * Store a message in the agent context
533 | *
534 | * @param content content of the message to store
535 | * @param role message role (default: user)
536 | */
537 | public addMessage(message: MagmaMessageType): void {
538 | const newMessage = new MagmaMessage(message);
539 |
540 | // Validate images are base64 data, not URLs
541 | for (const image of newMessage.getImages()) {
542 | if (
543 | (this.providerConfig.provider === 'anthropic' ||
544 | this.providerConfig.provider === 'google') &&
545 | image.type === 'image/url'
546 | ) {
547 | throw new Error('Image URLs are not supported by Anthropic');
548 | }
549 | }
550 |
551 | this.messages.push(newMessage);
552 | }
553 |
554 | /**
555 | * Set the messages for the agent
556 | * @param messages messages to set
557 | */
558 | public setMessages(messages: MagmaMessage[]): void {
559 | this.messages = messages;
560 | }
561 |
562 | /**
563 | * Remove a message from the agent context
564 | * If no filter is provided, the last message is removed
565 | *
566 | * @param filter optional filter to remove a specific message
567 | */
568 | public removeMessage(filter?: (message: MagmaMessage) => boolean): void {
569 | if (filter) {
570 | this.messages = this.messages.filter((message) => !filter(message));
571 | } else {
572 | this.messages.pop();
573 | }
574 | }
575 |
576 | /**
577 | * Get the last N messages from the agent context
578 | * @param slice number of messages to return (default: 20)
579 | * @returns array of messages
580 | */
581 | public getMessages(slice: number = 20) {
582 | if (slice === -1) return this.messages;
583 |
584 | let messages = this.messages.slice(-slice);
585 | if (messages.length && messages.length > 0 && messages.at(0).getToolResults().length > 0) {
586 | messages = messages.slice(1);
587 | }
588 |
589 | return messages;
590 | }
591 |
592 | /**
593 | * Stops the currently executing request.
594 | * @returns true if a request was killed, false otherwise
595 | */
596 | public kill(): void {
597 | this.abortControllers.forEach((controller) => controller.abort());
598 | this.abortControllers.clear();
599 | }
600 |
601 | /**
602 | * Return whether the agent is currently processing a request
603 | */
604 | public get processing(): boolean {
605 | return this.abortControllers.size > 0;
606 | }
607 |
608 | public scheduleJobs({ verbose = false }: { verbose?: boolean } = {}): void {
609 | const jobs = this.jobs;
610 |
611 | for (const job of jobs) {
612 | if (verbose)
613 | this.log(`Job ${job.handler.name.split(' ').at(-1)} scheduled for ${job.schedule}`);
614 | this.scheduledJobs.push(
615 | cron.schedule(job.schedule, job.handler.bind(this), job.options)
616 | );
617 | }
618 | }
619 |
620 | public cancelJobs(): void {
621 | for (const scheduledJob of this.scheduledJobs) {
622 | scheduledJob.stop();
623 | }
624 |
625 | this.scheduledJobs = [];
626 | }
627 |
628 | /**
629 | * Given a tool call, find the appropriate function to handle the run
630 | *
631 | * @param call MagmaToolCall tool call to run
632 | * @returns completion to continue the conversation
633 | */
634 | private async executeTools(
635 | message: MagmaMessage,
636 | allowList: string[] = []
637 | ): Promise {
638 | // run preToolExecution middleware
639 | let modifiedMessage = await this.runMiddleware('preToolExecution', message);
640 |
641 | if (!modifiedMessage) {
642 | throw new Error(
643 | `Catastrophic error: failed preToolExecution middleware ${kMiddlewareMaxRetries} times`
644 | );
645 | }
646 |
647 | let toolResultBlocks: MagmaToolResultBlock[] = [];
648 |
649 | // execute the tool calls that didn't throw errors in preToolExecution middleware
650 | for (const toolCall of modifiedMessage.getToolCalls()) {
651 | let toolResult: MagmaToolResult;
652 |
653 | if (toolCall.error) {
654 | toolResult = {
655 | id: toolCall.id,
656 | fn_name: toolCall.fn_name,
657 | result: toolCall.error,
658 | error: true,
659 | call: toolCall,
660 | };
661 | } else {
662 | try {
663 | const tool = this.tools
664 | .filter((t) => t.enabled(this) || allowList.includes(t.name))
665 | .find((t) => t.name === toolCall.fn_name);
666 | if (!tool)
667 | throw new Error(`No tool found to handle call for ${toolCall.fn_name}()`);
668 |
669 | const result = await tool.target(toolCall, this);
670 | if (!result) {
671 | this.log(`Tool execution failed for ${toolCall.fn_name}()`);
672 | }
673 |
674 | toolResult = {
675 | id: toolCall.id,
676 | result: result ?? 'No result returned',
677 | error: false,
678 | fn_name: toolCall.fn_name,
679 | call: toolCall,
680 | };
681 |
682 | this.retryCount = 0;
683 | } catch (error) {
684 | const errorMessage = `Tool Execution Failed for ${toolCall.fn_name}() - ${error.message ?? 'Unknown'}`;
685 | this.log(errorMessage);
686 |
687 | toolResult = {
688 | id: toolCall.id,
689 | result: errorMessage,
690 | error: true,
691 | fn_name: toolCall.fn_name,
692 | call: toolCall,
693 | };
694 | }
695 | }
696 |
697 | toolResultBlocks.push({
698 | type: 'tool_result',
699 | tool_result: toolResult,
700 | });
701 | }
702 |
703 | modifiedMessage = await this.runMiddleware(
704 | 'onToolExecution',
705 | new MagmaMessage({
706 | role: 'assistant',
707 | blocks: toolResultBlocks,
708 | })
709 | );
710 |
711 | if (!modifiedMessage) {
712 | throw new Error(
713 | `Catastrophic error: failed onToolExecution middleware ${kMiddlewareMaxRetries} times`
714 | );
715 | }
716 |
717 | return modifiedMessage.getToolResults();
718 | }
719 |
720 | private async runMiddleware(
721 | trigger: T,
722 | message: MagmaMessage
723 | ): Promise {
724 | if (message.role === 'system') return message;
725 |
726 | // Determine whether there are relevant middleware actions to run
727 | const middleware = this.middleware.filter((f) => f.trigger === trigger);
728 | if (middleware.length === 0) return message;
729 |
730 | const messageResult: MagmaMessage = new MagmaMessage({
731 | role: message.role,
732 | blocks: [],
733 | });
734 |
735 | const middlewareErrors: string[] = [];
736 |
737 | // Run the middleware for each block
738 | for (const item of message.blocks) {
739 | let middlewarePayload: MagmaMiddlewareParamType;
740 |
741 | switch (item.type) {
742 | case 'text':
743 | // If the middleware is preCompletion and its an assistant message, we skip it
744 | if (trigger === 'preCompletion' && message.role === 'assistant') {
745 | messageResult.blocks.push(item);
746 | continue;
747 | }
748 | // If the middleware is onCompletion and its a user message, we skip it
749 | if (trigger === 'onCompletion' && message.role === 'user') {
750 | messageResult.blocks.push(item);
751 | continue;
752 | }
753 |
754 | // If the middleware is not preCompletion, onCompletion, onMainFinish, or postProcess we skip it
755 | if (
756 | trigger !== 'preCompletion' &&
757 | trigger !== 'onCompletion' &&
758 | trigger !== 'onMainFinish' &&
759 | trigger !== 'postProcess'
760 | ) {
761 | messageResult.blocks.push(item);
762 | continue;
763 | }
764 | middlewarePayload = item.text as MagmaMiddlewareParamType<
765 | 'preCompletion' | 'onCompletion' | 'onMainFinish' | 'postProcess'
766 | >;
767 | break;
768 | case 'tool_call':
769 | // If the middleware is not preToolExecution, we skip it
770 | if (trigger !== 'preToolExecution') {
771 | messageResult.blocks.push(item);
772 | continue;
773 | }
774 | middlewarePayload = {
775 | id: item.tool_call.id,
776 | fn_name: item.tool_call.fn_name,
777 | fn_args: item.tool_call.fn_args,
778 | } as MagmaMiddlewareParamType<'preToolExecution'>;
779 | break;
780 | case 'tool_result':
781 | // If the middleware is not onToolExecution, we skip it
782 | if (trigger !== 'onToolExecution') {
783 | messageResult.blocks.push(item);
784 | continue;
785 | }
786 | middlewarePayload = {
787 | id: item.tool_result.id,
788 | fn_name: item.tool_result.fn_name,
789 | result: item.tool_result.result,
790 | error: item.tool_result.error,
791 | call: item.tool_result.call,
792 | } as MagmaMiddlewareParamType<'onToolExecution'>;
793 | break;
794 | default:
795 | messageResult.blocks.push(item);
796 | continue;
797 | }
798 |
799 | for (const mdlwr of middleware) {
800 | try {
801 | // Run middleware target action on payload completion
802 | const middlewareResult = (await mdlwr.action(
803 | middlewarePayload,
804 | this
805 | )) as MagmaMiddlewareReturnType;
806 | if (middlewareResult) {
807 | middlewarePayload = middlewareResult;
808 | }
809 | } catch (error) {
810 | const mHash = hash(mdlwr.action.toString());
811 | this.middlewareRetries[mHash] ??= 0;
812 | this.middlewareRetries[mHash] += 1;
813 |
814 | // Add the error to the middlewareErrors array
815 | middlewareErrors.push(error.message);
816 |
817 | if (this.middlewareRetries[mHash] >= kMiddlewareMaxRetries) {
818 | this.log(
819 | `${trigger} middleware failed to recover after ${kMiddlewareMaxRetries} attempts`
820 | );
821 |
822 | if (mdlwr.critical) {
823 | return null;
824 | } else {
825 | middlewareErrors.pop();
826 | delete this.middlewareRetries[mHash];
827 | continue;
828 | }
829 | }
830 |
831 | this.log(
832 | `'${trigger}' middleware threw an error - ${error.message ?? 'Unknown'}`
833 | );
834 | }
835 | }
836 |
837 | switch (item.type) {
838 | case 'text':
839 | messageResult.blocks.push({
840 | type: 'text',
841 | text: middlewarePayload as MagmaMiddlewareParamType<
842 | 'preCompletion' | 'onCompletion' | 'onMainFinish' | 'postProcess'
843 | >,
844 | });
845 | break;
846 | case 'tool_call':
847 | if (middlewareErrors.length > 0) {
848 | (middlewarePayload as MagmaMiddlewareParamType<'preToolExecution'>).error =
849 | middlewareErrors.join('\n');
850 | }
851 | messageResult.blocks.push({
852 | type: 'tool_call',
853 | tool_call:
854 | middlewarePayload as MagmaMiddlewareParamType<'preToolExecution'>,
855 | });
856 | break;
857 | case 'tool_result':
858 | if (middlewareErrors.length > 0) {
859 | (middlewarePayload as MagmaMiddlewareParamType<'onToolExecution'>).result =
860 | middlewareErrors.join('\n');
861 | (middlewarePayload as MagmaMiddlewareParamType<'onToolExecution'>).error =
862 | true;
863 | }
864 | messageResult.blocks.push({
865 | type: 'tool_result',
866 | tool_result:
867 | middlewarePayload as MagmaMiddlewareParamType<'onToolExecution'>,
868 | });
869 | break;
870 | }
871 | }
872 |
873 | if (middlewareErrors.length === 0) {
874 | // Remove errors for middleware that was just run as everything was OK
875 | middleware.forEach(
876 | (mdlwr) => delete this.middlewareRetries[hash(mdlwr.action.toString())]
877 | );
878 | } else if (trigger !== 'preToolExecution' && trigger !== 'onToolExecution') {
879 | throw new Error(middlewareErrors.join('\n'));
880 | }
881 |
882 | return messageResult;
883 | }
884 |
885 | /* GETTERS */
886 |
887 | public get utilities(): MagmaUtilities[] {
888 | const loadedUtilities = this.getUtilities();
889 |
890 | return loadedUtilities;
891 | }
892 |
893 | public getUtilities(): MagmaUtilities[] {
894 | return [];
895 | }
896 |
897 | public getTools(): MagmaTool[] {
898 | return [];
899 | }
900 |
901 | public getMiddleware(): MagmaMiddleware[] {
902 | return [];
903 | }
904 |
905 | public getHooks(): MagmaHook[] {
906 | return [];
907 | }
908 |
909 | public getJobs(): MagmaJob[] {
910 | return [];
911 | }
912 |
913 | public get tools(): MagmaTool[] {
914 | const agentTools = loadTools(this);
915 | const loadedTools = this.getTools();
916 | const utilityTools = this.utilities.flatMap((u) => u.tools.filter(Boolean));
917 | return agentTools.concat(loadedTools).concat(utilityTools);
918 | }
919 |
920 | public get middleware(): MagmaMiddleware[] {
921 | const agentMiddleware = loadMiddleware(this);
922 | const loadedMiddleware = this.getMiddleware();
923 | const utilityMiddleware = this.utilities.flatMap((u) => u.middleware.filter(Boolean));
924 | return agentMiddleware
925 | .concat(loadedMiddleware)
926 | .concat(utilityMiddleware)
927 | .sort((a, b) => (a.order ?? Number.MAX_VALUE) - (b.order ?? Number.MAX_VALUE));
928 | }
929 |
930 | public get hooks(): MagmaHook[] {
931 | const agentHooks = loadHooks(this);
932 | const loadedHooks = this.getHooks();
933 | const utilityHooks = this.utilities.flatMap((u) => u.hooks.filter(Boolean));
934 | return agentHooks.concat(loadedHooks).concat(utilityHooks);
935 | }
936 |
937 | public get jobs(): MagmaJob[] {
938 | const agentJobs = loadJobs(this);
939 | const loadedJobs = this.getJobs();
940 | const utilityJobs = this.utilities.flatMap((u) => u.jobs.filter(Boolean));
941 | return agentJobs.concat(loadedJobs).concat(utilityJobs);
942 | }
943 |
944 | /* EVENT HANDLERS */
945 |
946 | getSystemPrompts(): MagmaSystemMessageType[] {
947 | return [];
948 | }
949 |
950 | onError(error: Error): Promise | void {
951 | throw error;
952 | }
953 |
954 | onStreamChunk(chunk: MagmaStreamChunk | null): Promise | void {
955 | chunk;
956 | return;
957 | }
958 |
959 | onUsageUpdate(usage: object): Promise | void {
960 | usage;
961 | return;
962 | }
963 |
964 | onCleanup(): Promise | void {
965 | return;
966 | }
967 | }
968 |
--------------------------------------------------------------------------------
/src/decorators.ts:
--------------------------------------------------------------------------------
1 | import { Request, Response } from 'express';
2 | import { MagmaAgent } from './agent';
3 | import {
4 | MagmaToolParam,
5 | MagmaMiddlewareTriggerType,
6 | MagmaMiddlewareTriggers,
7 | MagmaMiddlewareParamType,
8 | MagmaMiddlewareReturnType,
9 | MagmaToolCall,
10 | MagmaToolReturnType,
11 | MagmaHook,
12 | } from './types';
13 | import { validate } from 'node-cron';
14 |
15 | /**
16 | * Decorator to define a tool (optional)
17 | * @param args name and description for tool
18 | */
19 | export function tool(args: {
20 | name?: string;
21 | description?: string;
22 | cache?: boolean;
23 | enabled?: (agent: MagmaAgent) => boolean;
24 | }) {
25 | return function >(
26 | target: object,
27 | propertyKey: string,
28 | descriptor: TypedPropertyDescriptor<
29 | ((call: MagmaToolCall, agent: MagmaAgent) => R) & {
30 | _toolInfo?: {
31 | name?: string;
32 | description?: string;
33 | cache?: boolean;
34 | enabled?: (agent: MagmaAgent) => boolean;
35 | };
36 | }
37 | >
38 | ) {
39 | descriptor.value._toolInfo = {
40 | name: args.name ?? propertyKey,
41 | description: args.description,
42 | cache: args.cache,
43 | enabled: args.enabled,
44 | };
45 | };
46 | }
47 |
48 | /**
49 | * Decorator for functions that are exposed to OpenAI tool calls
50 | * @param key name of the parameter
51 | * @param type type of the parameter (string, number, boolean, object, array)
52 | * @param description optional description of the parameter
53 | * @param required whether the parameter is required or not
54 | */
55 | export function toolparam(args: MagmaToolParam & { key: string; required?: boolean }) {
56 | return function >(
57 | target: object,
58 | propertyKey: string,
59 | descriptor: TypedPropertyDescriptor<
60 | ((call: MagmaToolCall, agent: MagmaAgent) => R) & {
61 | _methodName?: string;
62 | _parameterInfo?: (MagmaToolParam & { key: string; required?: boolean })[];
63 | }
64 | >
65 | ) {
66 | // Ensure metadata exists on this method's prototype
67 | if (!descriptor.value._methodName) {
68 | descriptor.value._methodName = propertyKey;
69 | }
70 |
71 | descriptor.value._parameterInfo ??= [];
72 | descriptor.value._parameterInfo.push(args);
73 | };
74 | }
75 |
76 | /**
77 | * Decorator for middleware functions to run during completion chains
78 | * @param trigger which middleware event should trigger the decorated function
79 | */
80 | export function middleware(
81 | trigger: T,
82 | options: { critical?: boolean; order?: number } = { critical: false }
83 | ) {
84 | return function <
85 | R extends MagmaMiddlewareReturnType | Promise>,
86 | >(
87 | target: object,
88 | propertyKey: string,
89 | descriptor: TypedPropertyDescriptor<
90 | ((content?: MagmaMiddlewareParamType, agent?: MagmaAgent) => R) & {
91 | _middlewareTrigger?: T;
92 | _critical?: boolean;
93 | _order?: number;
94 | }
95 | >
96 | ) {
97 | if (!trigger) {
98 | throw new Error('Middleware trigger is required');
99 | }
100 |
101 | if (!MagmaMiddlewareTriggers.includes(trigger)) {
102 | throw new Error(`Invalid middleware trigger - ${trigger}`);
103 | }
104 |
105 | descriptor.value._middlewareTrigger = trigger;
106 | descriptor.value._critical = options.critical;
107 | descriptor.value._order = options.order;
108 | return descriptor;
109 | };
110 | }
111 |
112 | /**
113 | * Decorator for webhook functions
114 | * @param hookName name of the hook
115 | * @param options configuration options for the hook
116 | * @param options.session session configuration for the hook
117 | * Examples:
118 | * @hook('notification') -> POST /hooks/notification
119 | * @hook('notification', { session: 'default' })
120 | * @hook('notification', { session: (req) => req.body.userId })
121 | * @hook('notification', { session: fetchFromExternal(req) })
122 | */
123 | export function hook(hookName: string, options: { session?: MagmaHook['session'] } = {}) {
124 | return function (
125 | target: object,
126 | propertyKey: string,
127 | descriptor: TypedPropertyDescriptor<
128 | ((req: Request, res: Response, agent?: MagmaAgent) => R) & {
129 | _hookName?: string;
130 | _session?: MagmaHook['session'];
131 | }
132 | >
133 | ) {
134 | descriptor.value._hookName = hookName;
135 | descriptor.value._session = options.session;
136 | };
137 | }
138 |
139 | /**
140 | * Decorator for scheduled jobs
141 | * @param cron cron expression (https://www.npmjs.com/package/node-cron#cron-syntax)
142 | * @param options configuration options for the job
143 | * @param options.timezone set the timezone for the job schedule
144 | */
145 | export function job(cron: string, options: { timezone?: string } = {}) {
146 | // Validate cron expression
147 | if (!validate(cron)) {
148 | throw new Error(`Invalid cron expression - ${cron}`);
149 | }
150 |
151 | return function (
152 | target: object,
153 | propertyKey: string,
154 | descriptor: TypedPropertyDescriptor<
155 | ((agent?: MagmaAgent) => R) & { _schedule?: string; _options?: { timezone?: string } }
156 | >
157 | ) {
158 | descriptor.value._schedule = cron;
159 | descriptor.value._options = options;
160 | };
161 | }
162 |
--------------------------------------------------------------------------------
/src/helpers.ts:
--------------------------------------------------------------------------------
1 | import { MagmaAgent } from './agent';
2 | import {
3 | MagmaHook,
4 | MagmaTool,
5 | MagmaToolParam,
6 | MagmaJob,
7 | MagmaMiddleware,
8 | MagmaUtilities,
9 | MagmaMessage,
10 | } from './types';
11 |
12 | /**
13 | * Helper function to recursively convert a MagmaToolParam to JSON object schema
14 | * @param param MagmaToolParam to convert to JSON object schema. Nested objects will be converted first
15 | * @param requiredList keys of required parameters
16 | * @returns { Record } JSON object schema
17 | */
18 | export const cleanParam = (
19 | param: MagmaToolParam & { key?: string; required?: boolean },
20 | requiredList?: string[]
21 | ): Record => {
22 | param.required && param.key && requiredList?.push(param.key);
23 |
24 | const objectRequiredParams = [];
25 |
26 | switch (param.type) {
27 | case 'array':
28 | if (!param.items)
29 | throw new Error(
30 | `Array parameters must have items defined - ${JSON.stringify(param)}`
31 | );
32 | return {
33 | type: 'array',
34 | description: param.description,
35 | items: cleanParam(param.items),
36 | };
37 | case 'object':
38 | if (!param.properties)
39 | throw new Error(
40 | `Object parameters must have properties defined - ${JSON.stringify(param)}`
41 | );
42 |
43 | return {
44 | type: 'object',
45 | description: param.description,
46 | properties: Object.fromEntries(
47 | param.properties.map((property) => {
48 | if (!property.key)
49 | throw new Error(
50 | `Object properties must have keys defined - ${JSON.stringify(property)}`
51 | );
52 |
53 | return [property.key, cleanParam(property, objectRequiredParams)];
54 | })
55 | ),
56 | required: objectRequiredParams,
57 | };
58 | case 'string':
59 | return {
60 | type: 'string',
61 | description: param.description,
62 | enum: param.enum,
63 | };
64 |
65 | case 'number':
66 | return {
67 | type: 'number',
68 | description: param.description,
69 | enum: param.enum,
70 | };
71 | case 'boolean':
72 | return {
73 | type: 'boolean',
74 | description: param.description,
75 | };
76 | }
77 | };
78 |
79 | /**
80 | * Helper function to load utilities from a class or instance of a class
81 | * If the target is a class, it will load the static utilities
82 | * If the target is an instance of a class, it will load the instance utilities (static Tools and Middleware are also loaded)
83 | * @param target class or instance of a class to load utilities from
84 | * @returns MagmaUtilities object
85 | */
86 | export function loadUtilities(target: any): MagmaUtilities {
87 | const tools = loadTools(target);
88 | const hooks = loadHooks(target);
89 | const jobs = loadJobs(target);
90 | const middleware = loadMiddleware(target);
91 |
92 | return { tools, hooks, jobs, middleware };
93 | }
94 |
95 | /**
96 | * Helper function to load tools from a class or instance of a class
97 | * If the target is an instance, it will load both the static and instance tools
98 | * @param target class or instance of a class to load tools from
99 | * @returns array of MagmaTool objects
100 | */
101 | export function loadTools(target: any): MagmaTool[] {
102 | const tools: MagmaTool[] = [];
103 | const { staticMethods, instanceMethods } = getMethodsFromClassOrInstance(target);
104 | const methods = [...staticMethods, ...instanceMethods];
105 |
106 | for (const method of methods) {
107 | if (typeof method === 'function' && ('_toolInfo' in method || '_parameterInfo' in method)) {
108 | const params: MagmaToolParam[] = method['_parameterInfo'] ?? [];
109 | tools.push({
110 | target: method.bind(target),
111 | name: (method['_toolInfo'] as any)?.name ?? method['_methodName'],
112 | description: (method['_toolInfo'] as any)?.description ?? undefined,
113 | params,
114 | enabled: (method['_toolInfo'] as any)?.enabled ?? (() => true),
115 | cache: (method['_toolInfo'] as any)?.cache ?? false,
116 | } as MagmaTool);
117 | }
118 | }
119 |
120 | return tools;
121 | }
122 |
123 | /**
124 | * Helper function to load hooks from a class or instance of a class
125 | * If the target is a class, it will load the static hooks
126 | * If the target is an instance of a class, it will load the instance hooks
127 | * @param target class or instance of a class to load hooks from
128 | * @returns array of MagmaHook objects
129 | */
130 | export function loadHooks(target: any): MagmaHook[] {
131 | const hooks: MagmaHook[] = [];
132 | const { staticMethods, instanceMethods, isInstance } = getMethodsFromClassOrInstance(target);
133 | const methods = isInstance ? instanceMethods : staticMethods;
134 |
135 | for (const method of methods) {
136 | if (typeof method === 'function' && '_hookName' in method) {
137 | hooks.push({
138 | name: method['_hookName'],
139 | handler: method.bind(target),
140 | session: method['_session'],
141 | } as MagmaHook);
142 | }
143 | }
144 |
145 | return hooks;
146 | }
147 |
148 | /**
149 | * Helper function to load jobs from a class or instance of a class
150 | * If the target is a class, it will load the static jobs
151 | * If the target is an instance of a class, it will load the instance jobs
152 | * @param target class or instance of a class to load jobs from
153 | * @returns array of MagmaJob objects
154 | */
155 | export function loadJobs(target: any): MagmaJob[] {
156 | const jobs: MagmaJob[] = [];
157 | const { staticMethods, instanceMethods, isInstance } = getMethodsFromClassOrInstance(target);
158 | const methods = isInstance ? instanceMethods : staticMethods;
159 |
160 | for (const method of methods) {
161 | if (typeof method === 'function' && '_schedule' in method) {
162 | jobs.push({
163 | handler: method.bind(target),
164 | schedule: method['_schedule'],
165 | options: method['_options'],
166 | name: method['_methodName'] || method['name'],
167 | } as MagmaJob);
168 | }
169 | }
170 |
171 | return jobs;
172 | }
173 |
174 | /**
175 | * Helper function to load middleware from a class or instance of a class
176 | * If the target is an instance, it will load both the static and instance middleware
177 | * @param target class or instance of a class to load middleware from
178 | * @returns array of MagmaMiddleware objects
179 | */
180 | export function loadMiddleware(target: any): MagmaMiddleware[] {
181 | const middleware: MagmaMiddleware[] = [];
182 |
183 | const { staticMethods, instanceMethods } = getMethodsFromClassOrInstance(target);
184 | const methods = [...staticMethods, ...instanceMethods];
185 |
186 | for (const method of methods) {
187 | if (typeof method === 'function' && '_middlewareTrigger' in method) {
188 | middleware.push({
189 | trigger: method['_middlewareTrigger'],
190 | action: method.bind(target),
191 | name: method['_methodName'] || method['name'],
192 | critical: method['_critical'] ?? false,
193 | order: method['_order'],
194 | } as MagmaMiddleware);
195 | }
196 | }
197 |
198 | return middleware;
199 | }
200 |
201 | export function mapNumberInRange(
202 | n: number,
203 | min: number,
204 | max: number,
205 | newMin: number,
206 | newMax: number
207 | ): number {
208 | return ((n - min) * (newMax - newMin)) / (max - min) + newMin;
209 | }
210 |
211 | function getMethodsFromClassOrInstance(target: any): {
212 | staticMethods: Function[];
213 | instanceMethods: Function[];
214 | isInstance: boolean;
215 | } {
216 | const isClass = /^\s*class\s+/.test(target.toString());
217 | const isInstance = typeof target === 'object' && !isClass ? true : false;
218 | const staticMethods: Function[] = [];
219 | const instanceMethods: Function[] = [];
220 |
221 | if (isInstance) {
222 | const prototype = Object.getPrototypeOf(target);
223 | const instancePropertyNames = Object.getOwnPropertyNames(prototype);
224 | const constructor = prototype.constructor;
225 | const staticPropertyNames = Object.getOwnPropertyNames(constructor);
226 | staticMethods.push(...staticPropertyNames.map((name) => constructor[name]));
227 | instanceMethods.push(...instancePropertyNames.map((name) => prototype[name]));
228 | } else {
229 | const staticPropertyNames = Object.getOwnPropertyNames(target);
230 | staticMethods.push(...staticPropertyNames.map((name) => target[name]));
231 | }
232 |
233 | return { staticMethods, instanceMethods, isInstance: !isClass };
234 | }
235 |
236 | export async function sleep(ms: number): Promise {
237 | await new Promise((resolve) => setTimeout(resolve, ms));
238 | }
239 |
240 | export const hash = (str: string) => {
241 | let hash = 0;
242 | if (str.length === 0) return hash;
243 | for (let i = 0; i < str.length; i++) {
244 | const chr = str.charCodeAt(i);
245 | hash = (hash << 5) - hash + chr;
246 | hash |= 0;
247 | }
248 | return hash;
249 | };
250 |
251 | /**
252 | * Helper function to sanitize messages by removing tool calls and tool results that are not preceded by a tool call or tool result. This function operates on the messages array in place.
253 | * @param messages MagmaMessage[] to sanitize
254 | */
255 | export function sanitizeMessages(messages: MagmaMessage[]): void {
256 | for (let i = 0; i < messages.length; i++) {
257 | // if the message is a tool call
258 | if (messages[i].role === 'assistant' && messages[i].getToolCalls().length > 0) {
259 | // console.log('Tool call found', messages[i]);
260 | // if the message is at the end of the array, we need to remove it
261 | if (i === messages.length - 1) {
262 | // console.log(
263 | // 'Tool call found at the end of the array, removing',
264 | // messages[i]
265 | // );
266 | messages.pop();
267 | } else {
268 | // if the message is not at the end of the array, make sure the next message is a tool result
269 | if (
270 | messages[i + 1].role === 'user' &&
271 | messages[i + 1].getToolResults().length > 0
272 | ) {
273 | // console.log('Tool call found with tool result, continuing');
274 | continue;
275 | } else {
276 | // console.log(
277 | // 'Tool call found with no tool result, removing',
278 | // messages[i]
279 | // );
280 | messages.splice(i, 1);
281 | i--;
282 | }
283 | }
284 | // if the message is a tool result
285 | } else if (messages[i].role === 'user' && messages[i].getToolResults().length > 0) {
286 | // console.log('Tool result found', messages[i]);
287 | // if the message is at the beginning of the array, we need to remove it
288 | if (i === 0) {
289 | // console.log(
290 | // 'Tool result found at the beginning of the array, removing',
291 | // messages[i]
292 | // );
293 | messages.shift();
294 | i--;
295 | } else {
296 | // if the message is not at the beginning of the array, make sure the previous message is a tool call
297 | if (
298 | messages[i - 1].role === 'assistant' &&
299 | messages[i - 1].getToolCalls().length > 0
300 | ) {
301 | // console.log('Tool result found with tool call, continuing');
302 | continue;
303 | } else {
304 | // console.log(
305 | // 'Tool result found with no tool call, removing',
306 | // messages[i]
307 | // );
308 | messages.splice(i, 1);
309 | i--;
310 | }
311 | }
312 | }
313 | }
314 | }
315 |
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
1 | export { loadUtilities, loadHooks, loadJobs, loadMiddleware, loadTools, sanitizeMessages } from './helpers';
2 | export { MagmaAgent } from './agent';
3 |
--------------------------------------------------------------------------------
/src/providers/anthropic.ts:
--------------------------------------------------------------------------------
1 | import Anthropic from '@anthropic-ai/sdk';
2 | import { MAX_RETRIES, Provider } from '.';
3 | import {
4 | AnthropicProviderConfig,
5 | MagmaAssistantMessage,
6 | MagmaCompletion,
7 | MagmaCompletionConfig,
8 | MagmaCompletionStopReason,
9 | MagmaContentBlock,
10 | MagmaMessage,
11 | MagmaReasoningBlock,
12 | MagmaStreamChunk,
13 | MagmaSystemMessage,
14 | MagmaTextBlock,
15 | MagmaTool,
16 | MagmaToolCall,
17 | MagmaToolCallBlock,
18 | MagmaToolParam,
19 | MagmaUsage,
20 | } from '../types';
21 |
22 | import {
23 | MessageCreateParamsBase as AnthropicConfig,
24 | MessageParam as AnthropicMessageParam,
25 | Tool as AnthropicTool,
26 | Message as AnthropicMessage,
27 | Message,
28 | TextBlockParam,
29 | } from '@anthropic-ai/sdk/resources/messages';
30 | import { cleanParam, sleep } from '../helpers';
31 | import { safeJSON } from '@anthropic-ai/sdk/core';
32 | import type { MagmaAgent } from '../agent';
33 |
34 | export class AnthropicProvider extends Provider {
35 | static override convertConfig(config: MagmaCompletionConfig): AnthropicConfig {
36 | let tool_choice = undefined;
37 |
38 | if (config.tool_choice === 'auto') tool_choice = { type: 'auto' };
39 | else if (config.tool_choice === 'required') tool_choice = { type: 'any' };
40 | else if (typeof config.tool_choice === 'string')
41 | tool_choice = { type: 'tool', name: config.tool_choice };
42 |
43 | const { model, settings } = config.providerConfig as AnthropicProviderConfig;
44 |
45 | delete config.providerConfig;
46 |
47 | const anthropicConfig: AnthropicConfig = {
48 | ...config,
49 | model,
50 | messages: this.convertMessages(config.messages),
51 | max_tokens: settings?.max_tokens ?? (model.includes('claude-3-5') ? 8192 : 4096),
52 | tools: this.convertTools(config.tools),
53 | tool_choice,
54 | system: config.messages
55 | .filter((m) => m.role === 'system')
56 | .flatMap((m: MagmaSystemMessage) =>
57 | m.blocks
58 | .filter((b) => b.type === 'text')
59 | .map((b) => {
60 | const textBlock: TextBlockParam = {
61 | type: 'text',
62 | text: b.text,
63 | cache_control: b.cache ? { type: 'ephemeral' } : undefined,
64 | };
65 |
66 | return textBlock;
67 | })
68 | ),
69 | ...settings,
70 | };
71 |
72 | return anthropicConfig;
73 | }
74 |
75 | static override convertMessages(messages: MagmaMessage[]): AnthropicMessageParam[] {
76 | const anthropicMessages: AnthropicMessageParam[] = [];
77 |
78 | for (let i = 0; i < messages.length; i++) {
79 | const message = messages[i];
80 | if ('id' in message) delete message.id;
81 |
82 | switch (message.role) {
83 | case 'system':
84 | continue;
85 |
86 | case 'assistant':
87 | let assistantContent: AnthropicMessageParam['content'] = [];
88 |
89 | for (const block of message.blocks) {
90 | switch (block.type) {
91 | case 'reasoning':
92 | if (block.redacted) {
93 | assistantContent.push({
94 | type: 'redacted_thinking',
95 | data: block.reasoning,
96 | });
97 | } else {
98 | if (!block.signature) {
99 | assistantContent.push({
100 | type: 'text',
101 | text: `${block.reasoning}`,
102 | });
103 | } else {
104 | assistantContent.push({
105 | type: 'thinking',
106 | thinking: block.reasoning,
107 | signature: block.signature,
108 | });
109 | }
110 | }
111 | break;
112 | case 'tool_call':
113 | assistantContent.push({
114 | type: 'tool_use',
115 | id: block.tool_call.id,
116 | name: block.tool_call.fn_name,
117 | input: block.tool_call.fn_args,
118 | });
119 | break;
120 | case 'text':
121 | assistantContent.push({
122 | type: 'text',
123 | text: block.text,
124 | });
125 | break;
126 | default:
127 | throw new Error(
128 | `Unsupported block type for assistant messages: ${block.type}`
129 | );
130 | }
131 | }
132 |
133 | anthropicMessages.push({
134 | role: 'assistant',
135 | content: assistantContent,
136 | });
137 |
138 | // Check if the next message is also from the assistant
139 | if (
140 | i + 1 < messages.length &&
141 | messages[i + 1].role === 'assistant' &&
142 | messages[i].getToolCalls().length === 0
143 | ) {
144 | anthropicMessages.push({
145 | role: 'user',
146 | content: 'Continue.',
147 | });
148 | }
149 | break;
150 |
151 | case 'user':
152 | let userContent: AnthropicMessageParam['content'] = [];
153 |
154 | for (const block of message.blocks) {
155 | switch (block.type) {
156 | case 'text':
157 | userContent.push({
158 | type: 'text',
159 | text: block.text,
160 | });
161 | break;
162 | case 'image':
163 | if (block.image.type === 'image/url') {
164 | userContent.push({
165 | type: 'image',
166 | source: {
167 | type: 'url',
168 | url: block.image.data,
169 | },
170 | });
171 | } else {
172 | userContent.push({
173 | type: 'image',
174 | source: {
175 | type: 'base64',
176 | data: block.image.data,
177 | media_type: block.image.type,
178 | },
179 | });
180 | }
181 | break;
182 | case 'tool_result':
183 | userContent.push({
184 | type: 'tool_result',
185 | tool_use_id: block.tool_result.id,
186 | content:
187 | typeof block.tool_result.result !== 'string'
188 | ? JSON.stringify(block.tool_result.result)
189 | : block.tool_result.result,
190 | });
191 | break;
192 | default:
193 | throw new Error(
194 | `Unsupported block type for user messages: ${block.type}`
195 | );
196 | }
197 | }
198 |
199 | anthropicMessages.push({
200 | role: 'user',
201 | content: userContent,
202 | });
203 | break;
204 | }
205 | }
206 |
207 | if (anthropicMessages.length === 0 || anthropicMessages.at(0).role != 'user')
208 | anthropicMessages.unshift({
209 | role: 'user',
210 | content: 'begin',
211 | });
212 |
213 | return anthropicMessages;
214 | }
215 |
216 | static override async makeCompletionRequest({
217 | config,
218 | onStreamChunk,
219 | attempt = 0,
220 | signal,
221 | agent,
222 | }: {
223 | config: MagmaCompletionConfig;
224 | onStreamChunk?: (chunk: MagmaStreamChunk | null) => Promise;
225 | attempt: number;
226 | signal?: AbortSignal;
227 | agent: MagmaAgent;
228 | }): Promise {
229 | try {
230 | const anthropic = config.providerConfig.client as Anthropic;
231 | if (!anthropic) throw new Error('Anthropic instance not configured');
232 |
233 | const anthropicConfig = this.convertConfig(config);
234 |
235 | if (config.stream) {
236 | const stream = await anthropic.messages.create(
237 | {
238 | ...anthropicConfig,
239 | stream: true,
240 | },
241 | { signal }
242 | );
243 |
244 | let blockBuffer: MagmaContentBlock[] = [];
245 | const usage: MagmaUsage = {
246 | input_tokens: 0,
247 | output_tokens: 0,
248 | cache_write_tokens: 0,
249 | cache_read_tokens: 0,
250 | };
251 |
252 | let id = stream._request_id;
253 |
254 | let stopReason: MagmaCompletionStopReason = null;
255 |
256 | for await (const chunk of stream) {
257 | let magmaStreamChunk: MagmaStreamChunk = {
258 | id,
259 | provider: 'anthropic',
260 | model: anthropicConfig.model,
261 | delta: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
262 | buffer: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
263 | usage: {
264 | input_tokens: null,
265 | output_tokens: null,
266 | cache_write_tokens: null,
267 | cache_read_tokens: null,
268 | },
269 | stop_reason: null,
270 | };
271 |
272 | switch (chunk.type) {
273 | case 'message_start':
274 | id = chunk.message.id;
275 | magmaStreamChunk.id = id;
276 | usage.input_tokens += chunk.message.usage.input_tokens;
277 | usage.output_tokens += chunk.message.usage.output_tokens;
278 | usage.cache_write_tokens +=
279 | chunk.message.usage.cache_creation_input_tokens;
280 | usage.cache_read_tokens += chunk.message.usage.cache_read_input_tokens;
281 | magmaStreamChunk.usage.input_tokens = chunk.message.usage.input_tokens;
282 | magmaStreamChunk.usage.output_tokens =
283 | chunk.message.usage.output_tokens;
284 | magmaStreamChunk.usage.cache_write_tokens =
285 | chunk.message.usage.cache_creation_input_tokens;
286 | magmaStreamChunk.usage.cache_read_tokens =
287 | chunk.message.usage.cache_read_input_tokens;
288 | break;
289 | case 'message_delta':
290 | usage.output_tokens += chunk.usage.output_tokens;
291 | magmaStreamChunk.usage.output_tokens = chunk.usage.output_tokens;
292 | if (chunk.delta.stop_reason) {
293 | stopReason = this.convertStopReason(chunk.delta.stop_reason);
294 | magmaStreamChunk.stop_reason = stopReason;
295 | }
296 | break;
297 | case 'content_block_start':
298 | let blockStart: MagmaContentBlock;
299 | switch (chunk.content_block.type) {
300 | case 'text':
301 | blockStart = {
302 | type: 'text',
303 | text: chunk.content_block.text,
304 | };
305 | break;
306 | case 'thinking':
307 | blockStart = {
308 | type: 'reasoning',
309 | reasoning: chunk.content_block.thinking,
310 | signature: chunk.content_block.signature,
311 | };
312 | break;
313 | case 'redacted_thinking':
314 | blockStart = {
315 | type: 'reasoning',
316 | reasoning: chunk.content_block.data,
317 | redacted: true,
318 | };
319 | break;
320 | case 'tool_use':
321 | blockStart = {
322 | type: 'tool_call',
323 | tool_call: {
324 | id: chunk.content_block.id,
325 | fn_name: chunk.content_block.name,
326 | fn_args: {},
327 | fn_args_buffer: '',
328 | },
329 | };
330 | break;
331 | }
332 | blockBuffer[chunk.index] = blockStart;
333 | magmaStreamChunk.delta.blocks.push(blockStart as MagmaContentBlock);
334 | break;
335 | case 'content_block_delta':
336 | let blockToChange: MagmaContentBlock = blockBuffer[chunk.index];
337 | switch (chunk.delta.type) {
338 | case 'text_delta':
339 | blockToChange = blockBuffer[chunk.index] as MagmaTextBlock;
340 | blockToChange.text += chunk.delta.text;
341 | magmaStreamChunk.delta.blocks.push({
342 | type: 'text',
343 | text: chunk.delta.text,
344 | });
345 | break;
346 | case 'input_json_delta':
347 | blockToChange = blockBuffer[chunk.index] as MagmaToolCallBlock;
348 | blockToChange.tool_call.fn_args_buffer +=
349 | chunk.delta.partial_json;
350 | magmaStreamChunk.delta.blocks.push({
351 | type: 'tool_call',
352 | tool_call: {
353 | id: blockToChange.tool_call.id,
354 | fn_name: blockToChange.tool_call.fn_name,
355 | fn_args: safeJSON(chunk.delta.partial_json) ?? {},
356 | fn_args_buffer: chunk.delta.partial_json,
357 | },
358 | });
359 | break;
360 | case 'thinking_delta':
361 | blockToChange = blockBuffer[chunk.index] as MagmaReasoningBlock;
362 | blockToChange.reasoning += chunk.delta.thinking;
363 | magmaStreamChunk.delta.blocks.push({
364 | type: 'reasoning',
365 | reasoning: chunk.delta.thinking,
366 | });
367 | break;
368 | case 'signature_delta':
369 | blockToChange = blockBuffer[chunk.index] as MagmaReasoningBlock;
370 | blockToChange.signature += chunk.delta.signature;
371 | magmaStreamChunk.delta.blocks.push({
372 | type: 'reasoning',
373 | reasoning: '',
374 | signature: chunk.delta.signature,
375 | });
376 | break;
377 | default:
378 | throw new Error(`Unsupported delta type: ${chunk.delta.type}`);
379 | }
380 | break;
381 | case 'message_stop': {
382 | let magmaMessage: MagmaMessage = new MagmaMessage({
383 | role: 'assistant',
384 | blocks: blockBuffer.map((b) =>
385 | b.type === 'tool_call'
386 | ? {
387 | type: 'tool_call',
388 | tool_call: {
389 | ...b.tool_call,
390 | fn_args:
391 | safeJSON(b.tool_call.fn_args_buffer) ?? {},
392 | fn_args_buffer: b.tool_call.fn_args_buffer,
393 | },
394 | }
395 | : b
396 | ),
397 | });
398 |
399 | const magmaCompletion: MagmaCompletion = {
400 | provider: 'anthropic',
401 | model: anthropicConfig.model,
402 | message: magmaMessage,
403 | usage: usage,
404 | stop_reason: stopReason,
405 | };
406 |
407 | onStreamChunk?.(null);
408 |
409 | return magmaCompletion;
410 | }
411 | }
412 |
413 | magmaStreamChunk.buffer.blocks = blockBuffer.map((b) =>
414 | b.type === 'tool_call'
415 | ? {
416 | type: 'tool_call',
417 | tool_call: {
418 | ...b.tool_call,
419 | fn_args: safeJSON(b.tool_call.fn_args_buffer) ?? {},
420 | fn_args_buffer: b.tool_call.fn_args_buffer,
421 | },
422 | }
423 | : b
424 | );
425 |
426 | onStreamChunk?.(magmaStreamChunk);
427 | }
428 | } else {
429 | const anthropicCompletion = (await anthropic.messages.create(anthropicConfig, {
430 | signal,
431 | })) as AnthropicMessage;
432 |
433 | const blocks = anthropicCompletion.content;
434 |
435 | let magmaMessage: MagmaMessage = new MagmaMessage({
436 | role: 'assistant',
437 | blocks: [],
438 | });
439 |
440 | for (const block of blocks) {
441 | switch (block.type) {
442 | case 'text':
443 | magmaMessage.blocks.push({
444 | type: 'text',
445 | text: block.text,
446 | });
447 | break;
448 | case 'tool_use':
449 | magmaMessage.blocks.push({
450 | type: 'tool_call',
451 | tool_call: {
452 | id: block.id,
453 | fn_name: block.name,
454 | fn_args: block.input,
455 | },
456 | });
457 | break;
458 | case 'thinking':
459 | magmaMessage.blocks.push({
460 | type: 'reasoning',
461 | reasoning: block.thinking,
462 | signature: block.signature,
463 | });
464 | break;
465 | case 'redacted_thinking':
466 | magmaMessage.blocks.push({
467 | type: 'reasoning',
468 | reasoning: block.data,
469 | redacted: true,
470 | });
471 | break;
472 | default:
473 | throw new Error(
474 | `Unsupported block type for assistant messages: ${block}`
475 | );
476 | }
477 | }
478 |
479 | if (magmaMessage.blocks.length === 0) {
480 | console.log(JSON.stringify(anthropicCompletion, null, 2));
481 | throw new Error('Anthropic completion was null');
482 | }
483 |
484 | const magmaCompletion: MagmaCompletion = {
485 | provider: 'anthropic',
486 | model: anthropicCompletion.model,
487 | message: magmaMessage,
488 | usage: {
489 | input_tokens: anthropicCompletion.usage.input_tokens,
490 | output_tokens: anthropicCompletion.usage.output_tokens,
491 | cache_write_tokens: anthropicCompletion.usage.cache_creation_input_tokens,
492 | cache_read_tokens: anthropicCompletion.usage.cache_read_input_tokens,
493 | },
494 | stop_reason: this.convertStopReason(anthropicCompletion.stop_reason),
495 | };
496 |
497 | return magmaCompletion;
498 | }
499 | } catch (error) {
500 | if (signal?.aborted) {
501 | return null;
502 | }
503 | if (error.error?.type === 'rate_limit_error') {
504 | if (attempt >= MAX_RETRIES) {
505 | throw new Error(`Rate limited after ${MAX_RETRIES} attempts`);
506 | }
507 | const delay = Math.min(Math.pow(2, attempt) * 1000, 60000);
508 | agent.log(`Rate limited. Retrying after ${delay}ms.`);
509 |
510 | await sleep(delay);
511 | return this.makeCompletionRequest({
512 | config,
513 | onStreamChunk,
514 | attempt: attempt + 1,
515 | signal,
516 | agent,
517 | });
518 | } else {
519 | throw error;
520 | }
521 | }
522 | }
523 |
524 | // Tool schema to LLM function call converter
525 | static override convertTools(tools: MagmaTool[]): AnthropicTool[] | undefined {
526 | if (tools.length === 0) return undefined;
527 |
528 | const anthropicTools: AnthropicTool[] = [];
529 | for (const tool of tools) {
530 | const baseObject: MagmaToolParam = {
531 | type: 'object',
532 | properties: tool.params,
533 | };
534 |
535 | anthropicTools.push({
536 | name: tool.name,
537 | description: tool.description,
538 | input_schema: (tool.params.length === 0
539 | ? { type: 'object' }
540 | : cleanParam(baseObject, [])) as AnthropicTool.InputSchema,
541 | cache_control: tool.cache ? { type: 'ephemeral' } : undefined,
542 | });
543 | }
544 |
545 | return anthropicTools;
546 | }
547 |
548 | static override convertStopReason(
549 | stop_reason: Message['stop_reason']
550 | ): MagmaCompletionStopReason {
551 | switch (stop_reason) {
552 | case 'end_turn':
553 | return 'natural';
554 | case 'max_tokens':
555 | return 'max_tokens';
556 | case 'tool_use':
557 | return 'tool_call';
558 | default:
559 | return 'unknown';
560 | }
561 | }
562 | }
563 |
--------------------------------------------------------------------------------
/src/providers/google.ts:
--------------------------------------------------------------------------------
1 | import {
2 | Content,
3 | FinishReason,
4 | FunctionCallingMode,
5 | FunctionDeclaration,
6 | FunctionDeclarationSchema,
7 | FunctionResponsePart,
8 | GoogleGenerativeAI,
9 | ModelParams,
10 | Part,
11 | Tool,
12 | ToolConfig,
13 | } from '@google/generative-ai';
14 | import { MAX_RETRIES, Provider } from '.';
15 | import {
16 | GoogleProviderConfig,
17 | MagmaAssistantMessage,
18 | MagmaCompletion,
19 | MagmaCompletionConfig,
20 | MagmaCompletionStopReason,
21 | MagmaMessage,
22 | MagmaStreamChunk,
23 | MagmaTool,
24 | MagmaToolParam,
25 | MagmaUsage,
26 | } from '../types';
27 | import { cleanParam, sleep } from '../helpers';
28 | import { type MagmaAgent } from '../agent';
29 |
30 | export class GoogleProvider extends Provider {
31 | static override async makeCompletionRequest({
32 | config,
33 | onStreamChunk,
34 | attempt = 0,
35 | signal,
36 | agent,
37 | }: {
38 | config: MagmaCompletionConfig;
39 | onStreamChunk?: (chunk: MagmaStreamChunk | null) => Promise;
40 | attempt: number;
41 | signal?: AbortSignal;
42 | agent: MagmaAgent;
43 | }): Promise {
44 | try {
45 | const google = config.providerConfig.client as GoogleGenerativeAI;
46 | if (!google) throw new Error('Google instance not configured');
47 |
48 | const googleConfig = this.convertConfig(config);
49 |
50 | const model = google.getGenerativeModel(googleConfig);
51 |
52 | if (config.stream) {
53 | const { stream } = await model.generateContentStream(
54 | { contents: this.convertMessages(config.messages) },
55 | { signal }
56 | );
57 | let contentBuffer = '';
58 | const usage: MagmaUsage = {
59 | input_tokens: 0,
60 | output_tokens: 0,
61 | cache_write_tokens: 0,
62 | cache_read_tokens: 0,
63 | };
64 |
65 | const streamedToolCalls: {
66 | id: string;
67 | fn_name: string;
68 | fn_args: Record;
69 | fn_args_buffer: string;
70 | }[] = [];
71 |
72 | let id = crypto.randomUUID();
73 |
74 | let stopReason: MagmaCompletionStopReason = null;
75 |
76 | for await (const chunk of stream) {
77 | let magmaStreamChunk: MagmaStreamChunk = {
78 | id,
79 | provider: 'google',
80 | model: googleConfig.model,
81 | delta: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
82 | buffer: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
83 | usage: {
84 | input_tokens: null,
85 | output_tokens: null,
86 | cache_write_tokens: null,
87 | cache_read_tokens: null,
88 | },
89 | stop_reason: null,
90 | };
91 |
92 | if (chunk.usageMetadata) {
93 | magmaStreamChunk.usage.input_tokens = chunk.usageMetadata.promptTokenCount;
94 | magmaStreamChunk.usage.output_tokens =
95 | chunk.usageMetadata.candidatesTokenCount;
96 | }
97 |
98 | if (chunk.text().length > 0) {
99 | magmaStreamChunk.delta.blocks.push({
100 | type: 'text',
101 | text: chunk.text(),
102 | });
103 | contentBuffer += chunk.text();
104 | }
105 |
106 | if (chunk.functionCalls()?.length > 0) {
107 | for (const toolCall of chunk.functionCalls()) {
108 | streamedToolCalls.push({
109 | id: crypto.randomUUID(),
110 | fn_name: toolCall.name,
111 | fn_args: toolCall.args,
112 | fn_args_buffer: JSON.stringify(toolCall.args),
113 | });
114 | }
115 | }
116 |
117 | if (chunk.candidates[0]?.finishReason) {
118 | if (streamedToolCalls.length > 0) {
119 | stopReason = 'tool_call';
120 | } else {
121 | stopReason = this.convertStopReason(chunk.candidates[0].finishReason);
122 | }
123 | magmaStreamChunk.stop_reason = stopReason;
124 | }
125 |
126 | if (contentBuffer.length > 0) {
127 | magmaStreamChunk.buffer.blocks.push({
128 | type: 'text',
129 | text: contentBuffer,
130 | });
131 | }
132 |
133 | for (const toolCall of streamedToolCalls) {
134 | magmaStreamChunk.buffer.blocks.push({
135 | type: 'tool_call',
136 | tool_call: {
137 | id: toolCall.id,
138 | fn_name: toolCall.fn_name,
139 | fn_args: toolCall.fn_args,
140 | fn_args_buffer: toolCall.fn_args_buffer,
141 | },
142 | });
143 | }
144 |
145 | onStreamChunk?.(magmaStreamChunk);
146 | }
147 |
148 | let magmaMessage: MagmaMessage = new MagmaMessage({
149 | role: 'assistant',
150 | blocks: [],
151 | });
152 |
153 | for (const toolCall of streamedToolCalls) {
154 | magmaMessage.blocks.push({
155 | type: 'tool_call',
156 | tool_call: {
157 | id: toolCall.id,
158 | fn_name: toolCall.fn_name,
159 | fn_args: toolCall.fn_args,
160 | fn_args_buffer: toolCall.fn_args_buffer,
161 | },
162 | });
163 | }
164 |
165 | if (contentBuffer.length > 0) {
166 | magmaMessage.blocks.push({
167 | type: 'text',
168 | text: contentBuffer,
169 | });
170 | }
171 |
172 | const magmaCompletion: MagmaCompletion = {
173 | provider: 'google',
174 | model: googleConfig.model,
175 | message: magmaMessage,
176 | usage,
177 | stop_reason: stopReason,
178 | };
179 |
180 | onStreamChunk?.(null);
181 |
182 | return magmaCompletion;
183 | } else {
184 | const googleCompletion = await model.generateContent(
185 | { contents: this.convertMessages(config.messages) },
186 | {
187 | signal,
188 | }
189 | );
190 |
191 | let magmaMessage: MagmaMessage = new MagmaMessage({
192 | role: 'assistant',
193 | blocks: [],
194 | });
195 |
196 | const functionCalls = googleCompletion.response.functionCalls() ?? [];
197 | const text = googleCompletion.response.text();
198 |
199 | for (const toolCall of functionCalls) {
200 | magmaMessage.blocks.push({
201 | type: 'tool_call',
202 | tool_call: {
203 | id: crypto.randomUUID(),
204 | fn_name: toolCall.name,
205 | fn_args: toolCall.args,
206 | },
207 | });
208 | }
209 |
210 | if (text?.length > 0) {
211 | magmaMessage.blocks.push({
212 | type: 'text',
213 | text,
214 | });
215 | }
216 |
217 | if (magmaMessage.blocks.length === 0) {
218 | console.log(JSON.stringify(googleCompletion.response, null, 2));
219 | throw new Error('Google completion was null');
220 | }
221 |
222 | const magmaCompletion: MagmaCompletion = {
223 | provider: 'google',
224 | model: googleConfig.model,
225 | message: magmaMessage,
226 | usage: {
227 | input_tokens: googleCompletion.response.usageMetadata.promptTokenCount,
228 | output_tokens: googleCompletion.response.usageMetadata.candidatesTokenCount,
229 | cache_write_tokens: 0,
230 | cache_read_tokens: 0,
231 | },
232 | stop_reason:
233 | magmaMessage.getToolCalls().length > 0
234 | ? 'tool_call'
235 | : this.convertStopReason(
236 | googleCompletion.response.candidates[0]?.finishReason
237 | ),
238 | };
239 |
240 | return magmaCompletion;
241 | }
242 | } catch (error) {
243 | if (signal?.aborted) {
244 | return null;
245 | }
246 | if (error.response && error.response.status === 429) {
247 | if (attempt >= MAX_RETRIES) {
248 | throw new Error(`Rate limited after ${MAX_RETRIES} attempts`);
249 | }
250 | const delay = Math.min(Math.pow(2, attempt) * 1000, 60000);
251 | agent.log(`Rate limited. Retrying after ${delay}ms.`);
252 |
253 | await sleep(delay);
254 | return this.makeCompletionRequest({
255 | config,
256 | onStreamChunk,
257 | attempt: attempt + 1,
258 | signal,
259 | agent,
260 | });
261 | } else {
262 | throw error;
263 | }
264 | }
265 | }
266 |
267 | // Tool schema to LLM function call converter
268 | static override convertTools(tools: MagmaTool[]): FunctionDeclaration[] | undefined {
269 | if (tools.length === 0) return undefined;
270 |
271 | const googleTools: FunctionDeclaration[] = [];
272 |
273 | for (const tool of tools) {
274 | const baseObject: MagmaToolParam = {
275 | type: 'object',
276 | properties: tool.params,
277 | };
278 |
279 | const parameters = cleanParam(baseObject, []) as FunctionDeclarationSchema;
280 |
281 | googleTools.push({
282 | name: tool.name,
283 | description: tool.description,
284 | parameters: Object.keys(parameters.properties).length > 0 ? parameters : undefined,
285 | });
286 | }
287 |
288 | return googleTools;
289 | }
290 |
291 | // MagmaConfig to Provider-specific config converter
292 | static override convertConfig(config: MagmaCompletionConfig): ModelParams {
293 | const functionDeclarations: FunctionDeclaration[] = this.convertTools(config.tools);
294 |
295 | let toolConfig: ToolConfig = {
296 | functionCallingConfig: {
297 | mode: FunctionCallingMode.MODE_UNSPECIFIED,
298 | },
299 | };
300 |
301 | if (config.tool_choice === 'auto')
302 | toolConfig.functionCallingConfig.mode = FunctionCallingMode.AUTO;
303 | else if (config.tool_choice === 'required')
304 | toolConfig.functionCallingConfig.mode = FunctionCallingMode.ANY;
305 | else if (typeof config.tool_choice === 'string') {
306 | toolConfig.functionCallingConfig.mode = FunctionCallingMode.ANY;
307 | toolConfig.functionCallingConfig.allowedFunctionNames = [config.tool_choice];
308 | }
309 |
310 | const tools: Tool[] = [];
311 |
312 | functionDeclarations &&
313 | tools.push({
314 | functionDeclarations,
315 | });
316 |
317 | const { model, settings } = config.providerConfig as GoogleProviderConfig;
318 |
319 | const googleConfig: ModelParams = {
320 | model,
321 | tools,
322 | toolConfig,
323 | systemInstruction: config.messages
324 | .filter((m) => m.role === 'system')
325 | .map((m) => m.getText())
326 | .join('\n'),
327 | generationConfig: settings,
328 | };
329 |
330 | return googleConfig;
331 | }
332 |
333 | // MagmaMessage to Provider-specific message converter
334 | static override convertMessages(messages: MagmaMessage[]): Content[] {
335 | const googleMessages: Content[] = [];
336 |
337 | for (const message of messages) {
338 | if ('id' in message) delete message.id;
339 |
340 | switch (message.role) {
341 | case 'system':
342 | continue;
343 | case 'assistant':
344 | let assistantParts: Part[] = [];
345 | for (const block of message.blocks) {
346 | switch (block.type) {
347 | case 'text':
348 | assistantParts.push({ text: block.text });
349 | break;
350 | case 'tool_call':
351 | assistantParts.push({
352 | functionCall: {
353 | name: block.tool_call.fn_name,
354 | args: block.tool_call.fn_args,
355 | },
356 | });
357 | break;
358 | case 'reasoning':
359 | assistantParts.push({
360 | text: `${block.reasoning}`,
361 | });
362 | break;
363 | default:
364 | throw new Error(`Unsupported block type: ${block.type}`);
365 | }
366 | }
367 | if (assistantParts.length > 0) {
368 | googleMessages.push({
369 | role: 'model',
370 | parts: assistantParts,
371 | });
372 | }
373 | break;
374 | case 'user':
375 | let userParts: Part[] = [];
376 | for (const block of message.blocks) {
377 | switch (block.type) {
378 | case 'text':
379 | userParts.push({ text: block.text });
380 | break;
381 | case 'image':
382 | userParts.push({
383 | inlineData: {
384 | data: block.image.data,
385 | mimeType: block.image.type,
386 | },
387 | });
388 | break;
389 | case 'tool_result':
390 | const resultPart: FunctionResponsePart = {
391 | functionResponse: {
392 | name: block.tool_result.fn_name,
393 | response: block.tool_result.error
394 | ? {
395 | error: `Something went wrong calling your last tool - \n ${typeof block.tool_result.result !== 'string' ? JSON.stringify(block.tool_result.result) : block.tool_result.result}`,
396 | }
397 | : {
398 | result:
399 | typeof block.tool_result.result !== 'string'
400 | ? JSON.stringify(block.tool_result.result)
401 | : block.tool_result.result,
402 | },
403 | },
404 | };
405 | userParts.push(resultPart);
406 | break;
407 | default:
408 | throw new Error(`Unsupported block type: ${block.type}`);
409 | }
410 | }
411 |
412 | if (userParts.length > 0) {
413 | googleMessages.push({
414 | role: 'user',
415 | parts: userParts,
416 | });
417 | }
418 | break;
419 | }
420 | }
421 |
422 | if (googleMessages.length === 0) {
423 | googleMessages.unshift({
424 | role: 'user',
425 | parts: [{ text: 'begin' }],
426 | });
427 | }
428 |
429 | return googleMessages;
430 | }
431 |
432 | static override convertStopReason(stop_reason: FinishReason): MagmaCompletionStopReason {
433 | switch (stop_reason) {
434 | case FinishReason.RECITATION:
435 | case FinishReason.STOP:
436 | return 'natural';
437 | case FinishReason.MAX_TOKENS:
438 | return 'max_tokens';
439 | case FinishReason.SAFETY:
440 | return 'content_filter';
441 | case FinishReason.LANGUAGE:
442 | return 'unsupported';
443 | default:
444 | return 'unknown';
445 | }
446 | }
447 | }
448 |
--------------------------------------------------------------------------------
/src/providers/groq.ts:
--------------------------------------------------------------------------------
1 | import { MAX_RETRIES, Provider } from '.';
2 | import {
3 | GroqProviderConfig,
4 | MagmaAssistantMessage,
5 | MagmaCompletion,
6 | MagmaCompletionConfig,
7 | MagmaCompletionStopReason,
8 | MagmaMessage,
9 | MagmaStreamChunk,
10 | MagmaTextBlock,
11 | MagmaTool,
12 | MagmaToolCallBlock,
13 | MagmaToolParam,
14 | MagmaUsage,
15 | } from '../types';
16 | import {
17 | ChatCompletionTool as GroqTool,
18 | ChatCompletionMessageParam as GroqMessageParam,
19 | ChatCompletionCreateParamsBase as GroqConfig,
20 | ChatCompletionChunk,
21 | ChatCompletion,
22 | ChatCompletionUserMessageParam,
23 | } from 'groq-sdk/resources/chat/completions';
24 | import Groq from 'groq-sdk';
25 | import { cleanParam, sleep } from '../helpers';
26 | import { safeJSON } from 'groq-sdk/core';
27 | import type { MagmaAgent } from '../agent';
28 |
29 | export class GroqProvider extends Provider {
30 | static override convertConfig(config: MagmaCompletionConfig): GroqConfig {
31 | let tool_choice = undefined;
32 |
33 | if (config.tool_choice === 'auto') tool_choice = 'auto';
34 | else if (config.tool_choice === 'required') tool_choice = 'required';
35 | else if (typeof config.tool_choice === 'string')
36 | tool_choice = { type: 'function', function: { name: config.tool_choice } };
37 |
38 | const { model, settings } = config.providerConfig as GroqProviderConfig;
39 |
40 | delete config.providerConfig;
41 |
42 | const groqConfig: GroqConfig = {
43 | ...config,
44 | model,
45 | messages: this.convertMessages(config.messages),
46 | tools: this.convertTools(config.tools),
47 | tool_choice,
48 | ...settings,
49 | };
50 |
51 | return groqConfig;
52 | }
53 |
54 | static override async makeCompletionRequest({
55 | config,
56 | onStreamChunk,
57 | attempt = 0,
58 | signal,
59 | agent,
60 | }: {
61 | config: MagmaCompletionConfig;
62 | onStreamChunk?: (chunk: MagmaStreamChunk | null) => Promise;
63 | attempt: number;
64 | signal?: AbortSignal;
65 | agent: MagmaAgent;
66 | }): Promise {
67 | try {
68 | const groq = config.providerConfig.client as Groq;
69 | if (!groq) throw new Error('Groq instance not configured');
70 |
71 | const groqConfig = this.convertConfig(config);
72 |
73 | if (config.stream) {
74 | const stream = await groq.chat.completions.create(
75 | {
76 | ...groqConfig,
77 | stream: true,
78 | },
79 | { signal }
80 | );
81 |
82 | let contentBuffer = '';
83 | const usage: MagmaUsage = {
84 | input_tokens: 0,
85 | output_tokens: 0,
86 | cache_write_tokens: 0,
87 | cache_read_tokens: 0,
88 | };
89 |
90 | let streamedToolCalls: {
91 | [index: number]: ChatCompletionChunk.Choice.Delta.ToolCall;
92 | } = {};
93 |
94 | let stopReason: MagmaCompletionStopReason = null;
95 |
96 | for await (const chunk of stream) {
97 | let magmaStreamChunk: MagmaStreamChunk = {
98 | id: chunk.id,
99 | provider: 'groq',
100 | model: chunk.model,
101 | delta: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
102 | buffer: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
103 | usage: {
104 | input_tokens: null,
105 | output_tokens: null,
106 | cache_write_tokens: null,
107 | cache_read_tokens: null,
108 | },
109 | stop_reason: null,
110 | };
111 |
112 | const choice = chunk.choices[0];
113 | const delta = choice?.delta;
114 |
115 | if (choice?.finish_reason) {
116 | stopReason = this.convertStopReason(choice.finish_reason);
117 | magmaStreamChunk.stop_reason = stopReason;
118 | }
119 |
120 | for (const toolCall of delta?.tool_calls ?? []) {
121 | const { index } = toolCall;
122 |
123 | if (!streamedToolCalls[index]) {
124 | streamedToolCalls[index] = toolCall;
125 | } else {
126 | streamedToolCalls[index].function.arguments +=
127 | toolCall.function.arguments;
128 | }
129 | }
130 |
131 | if (chunk.x_groq?.usage) {
132 | usage.input_tokens = chunk.x_groq.usage.prompt_tokens;
133 | usage.output_tokens = chunk.x_groq.usage.completion_tokens;
134 | usage.cache_write_tokens = 0;
135 | usage.cache_read_tokens = 0;
136 | magmaStreamChunk.usage = {
137 | input_tokens: chunk.x_groq.usage.prompt_tokens,
138 | output_tokens: chunk.x_groq.usage.completion_tokens,
139 | cache_write_tokens: 0,
140 | cache_read_tokens: 0,
141 | };
142 | }
143 |
144 | if (delta?.tool_calls) {
145 | const toolCallBlocks: MagmaToolCallBlock[] = delta.tool_calls.map(
146 | (toolCall) => ({
147 | type: 'tool_call',
148 | tool_call: {
149 | id: streamedToolCalls[toolCall.index].id,
150 | fn_name: toolCall.function.name,
151 | fn_args: safeJSON(toolCall.function.arguments) ?? {},
152 | fn_args_buffer: toolCall.function.arguments,
153 | },
154 | })
155 | );
156 | magmaStreamChunk.delta.blocks.push(...toolCallBlocks);
157 | }
158 |
159 | if (delta?.content) {
160 | const textBlock: MagmaTextBlock = {
161 | type: 'text',
162 | text: delta.content,
163 | };
164 | magmaStreamChunk.delta.blocks.push(textBlock);
165 | contentBuffer += delta.content;
166 | }
167 |
168 | if (contentBuffer.length > 0) {
169 | const bufferTextBlock: MagmaTextBlock = {
170 | type: 'text',
171 | text: contentBuffer,
172 | };
173 | magmaStreamChunk.buffer.blocks.push(bufferTextBlock);
174 | }
175 |
176 | if (Object.keys(streamedToolCalls).length > 0) {
177 | const bufferToolCallBlocks: MagmaToolCallBlock[] = Object.values(
178 | streamedToolCalls
179 | ).map((toolCall) => ({
180 | type: 'tool_call',
181 | tool_call: {
182 | id: toolCall.id,
183 | fn_name: toolCall.function.name,
184 | fn_args: safeJSON(toolCall.function.arguments) ?? {},
185 | fn_args_buffer: toolCall.function.arguments,
186 | },
187 | }));
188 | magmaStreamChunk.buffer.blocks.push(...bufferToolCallBlocks);
189 | }
190 |
191 | onStreamChunk?.(magmaStreamChunk);
192 | }
193 |
194 | let magmaMessage = new MagmaMessage({ role: 'assistant', blocks: [] });
195 |
196 | if (contentBuffer.length > 0) {
197 | magmaMessage.blocks.push({
198 | type: 'text',
199 | text: contentBuffer,
200 | });
201 | }
202 |
203 | const toolCalls = Object.values(streamedToolCalls);
204 | if (toolCalls.length > 0) {
205 | const toolCallBlocks: MagmaToolCallBlock[] = toolCalls.map((toolCall) => ({
206 | type: 'tool_call',
207 | tool_call: {
208 | id: toolCall.id,
209 | fn_name: toolCall.function.name,
210 | fn_args: safeJSON(toolCall.function.arguments) ?? {},
211 | fn_args_buffer: toolCall.function.arguments,
212 | },
213 | }));
214 | magmaMessage.blocks.push(...toolCallBlocks);
215 | }
216 |
217 | const magmaCompletion: MagmaCompletion = {
218 | provider: 'groq',
219 | model: groqConfig.model,
220 | message: magmaMessage,
221 | usage,
222 | stop_reason: stopReason,
223 | };
224 |
225 | onStreamChunk?.(null);
226 |
227 | return magmaCompletion;
228 | } else {
229 | const groqCompletion = await groq.chat.completions.create(
230 | {
231 | ...groqConfig,
232 | stream: false,
233 | },
234 | { signal }
235 | );
236 |
237 | const choice = groqCompletion.choices[0];
238 | const groqMessage = choice?.message;
239 |
240 | let magmaMessage = new MagmaMessage({ role: 'assistant', blocks: [] });
241 |
242 | if (groqMessage?.content) {
243 | magmaMessage.blocks.push({
244 | type: 'text',
245 | text: groqMessage.content,
246 | });
247 | }
248 |
249 | if (groqMessage?.tool_calls) {
250 | const toolCallBlocks: MagmaToolCallBlock[] = groqMessage.tool_calls.map(
251 | (tool_call) => ({
252 | type: 'tool_call',
253 | tool_call: {
254 | id: tool_call.id,
255 | fn_name: tool_call.function.name,
256 | fn_args: safeJSON(tool_call.function.arguments) ?? {},
257 | fn_args_buffer: tool_call.function.arguments,
258 | },
259 | })
260 | );
261 | magmaMessage.blocks.push(...toolCallBlocks);
262 | }
263 |
264 | if (magmaMessage.blocks.length === 0) {
265 | console.log(JSON.stringify(groqCompletion.choices[0], null, 2));
266 | throw new Error('Groq completion was null');
267 | }
268 |
269 | const magmaCompletion: MagmaCompletion = {
270 | provider: 'groq',
271 | model: groqConfig.model,
272 | message: magmaMessage,
273 | usage: {
274 | input_tokens: groqCompletion.usage.prompt_tokens,
275 | output_tokens: groqCompletion.usage.completion_tokens,
276 | cache_write_tokens: 0,
277 | cache_read_tokens: 0,
278 | },
279 | stop_reason: this.convertStopReason(choice?.finish_reason),
280 | };
281 |
282 | return magmaCompletion;
283 | }
284 | } catch (error) {
285 | if (signal?.aborted) {
286 | return null;
287 | }
288 | if (error.response && error.response.status === 429) {
289 | if (attempt >= MAX_RETRIES) {
290 | throw new Error(`Rate limited after ${MAX_RETRIES} attempts`);
291 | }
292 | const delay = Math.min(Math.pow(2, attempt) * 1000, 60000);
293 | agent.log(`Rate limited. Retrying after ${delay}ms.`);
294 |
295 | await sleep(delay);
296 | return this.makeCompletionRequest({
297 | config,
298 | onStreamChunk,
299 | attempt: attempt + 1,
300 | signal,
301 | agent,
302 | });
303 | } else {
304 | throw error;
305 | }
306 | }
307 | }
308 |
309 | static override convertTools(tools: MagmaTool[]): GroqTool[] | undefined {
310 | if (tools.length === 0) return undefined;
311 |
312 | const groqTools: GroqTool[] = [];
313 |
314 | for (const tool of tools) {
315 | const baseObject: MagmaToolParam = {
316 | type: 'object',
317 | properties: tool.params,
318 | };
319 |
320 | groqTools.push({
321 | function: {
322 | name: tool.name,
323 | description: tool.description,
324 | parameters: cleanParam(baseObject, []),
325 | },
326 | type: 'function',
327 | });
328 | }
329 |
330 | return groqTools;
331 | }
332 |
333 | // MagmaMessage to Provider-specific message converter
334 | static override convertMessages(messages: MagmaMessage[]): GroqMessageParam[] {
335 | const groqMessages: GroqMessageParam[] = [];
336 |
337 | for (const message of messages) {
338 | if ('id' in message) delete message.id;
339 |
340 | switch (message.role) {
341 | case 'system':
342 | groqMessages.push({
343 | role: 'system',
344 | content: message.getText(),
345 | });
346 | break;
347 | case 'assistant':
348 | const reasoning = message.getReasoning();
349 | const assistantText = message.getText();
350 | const toolCalls = message.getToolCalls();
351 |
352 | let textWithReasoning = '';
353 | if (reasoning.length > 0)
354 | textWithReasoning += `${reasoning}\n`;
355 | if (assistantText.length > 0) textWithReasoning += `${assistantText}`;
356 |
357 | if (textWithReasoning.length > 0) {
358 | groqMessages.push({
359 | role: 'assistant',
360 | content: textWithReasoning,
361 | });
362 | }
363 |
364 | if (toolCalls.length > 0) {
365 | groqMessages.push({
366 | role: 'assistant',
367 | tool_calls: toolCalls.map((toolCall) => ({
368 | type: 'function',
369 | id: toolCall.id,
370 | function: {
371 | name: toolCall.fn_name,
372 | arguments: JSON.stringify(toolCall.fn_args),
373 | },
374 | })),
375 | });
376 | }
377 | break;
378 | case 'user':
379 | const userText = message.getText();
380 | const images = message.getImages();
381 | const toolResults = message.getToolResults();
382 |
383 | const content: ChatCompletionUserMessageParam['content'] = [];
384 |
385 | if (userText.length > 0) {
386 | content.push({ type: 'text', text: userText });
387 | }
388 |
389 | for (const image of images) {
390 | // If image is a string, it is a url
391 | if (image.type === 'image/url') {
392 | content.push({
393 | type: 'image_url',
394 | image_url: {
395 | url: image.data,
396 | },
397 | });
398 | } else {
399 | content.push({
400 | type: 'image_url',
401 | image_url: {
402 | url: `data:${image.type};base64,${image.data}`,
403 | },
404 | });
405 | }
406 | }
407 |
408 | if (toolResults.length > 0) {
409 | for (const toolResult of toolResults) {
410 | groqMessages.push({
411 | role: 'tool',
412 | tool_call_id: toolResult.id,
413 | content: toolResult.error
414 | ? `Something went wrong calling your last tool - \n ${typeof toolResult.result !== 'string' ? JSON.stringify(toolResult.result) : toolResult.result}`
415 | : typeof toolResult.result !== 'string'
416 | ? JSON.stringify(toolResult.result)
417 | : toolResult.result,
418 | });
419 | }
420 | }
421 |
422 | if (content.length > 0) {
423 | groqMessages.push({
424 | role: 'user',
425 | content,
426 | });
427 | }
428 | break;
429 | }
430 | }
431 |
432 | return groqMessages;
433 | }
434 |
435 | static override convertStopReason(
436 | stop_reason: ChatCompletion.Choice['finish_reason']
437 | ): MagmaCompletionStopReason {
438 | switch (stop_reason) {
439 | case 'stop':
440 | return 'natural';
441 | case 'tool_calls':
442 | case 'function_call':
443 | return 'tool_call';
444 | case 'length':
445 | return 'max_tokens';
446 | default:
447 | return 'unknown';
448 | }
449 | }
450 | }
451 |
--------------------------------------------------------------------------------
/src/providers/index.ts:
--------------------------------------------------------------------------------
1 | import type { MagmaAgent } from '../agent';
2 | import {
3 | MagmaCompletion,
4 | MagmaCompletionConfig,
5 | MagmaCompletionStopReason,
6 | MagmaMessage,
7 | MagmaProvider,
8 | MagmaStreamChunk,
9 | MagmaTool,
10 | } from '../types';
11 | import dotenv from 'dotenv';
12 |
13 | dotenv.config();
14 |
15 | interface ProviderProps {
16 | name: MagmaProvider;
17 | }
18 |
19 | export const MAX_RETRIES = 5;
20 |
21 | export abstract class Provider implements ProviderProps {
22 | name: MagmaProvider;
23 |
24 | constructor(props: ProviderProps) {
25 | this.name = props.name;
26 | }
27 |
28 | public static factory(name: MagmaProvider): typeof Provider {
29 | const { AnthropicProvider } = require('./anthropic');
30 | const { GroqProvider } = require('./groq');
31 | const { OpenAIProvider } = require('./openai');
32 | const { GoogleProvider } = require('./google');
33 | switch (name) {
34 | case 'anthropic':
35 | return AnthropicProvider;
36 | case 'openai':
37 | return OpenAIProvider;
38 | case 'groq':
39 | return GroqProvider;
40 | case 'google':
41 | return GoogleProvider;
42 | default:
43 | throw new Error(`Can not create factory class Provider with type ${name}`);
44 | }
45 | }
46 |
47 | static convertMessages(messages: MagmaMessage[]): object[] {
48 | messages;
49 | throw new Error('Provider.convertMessages not implemented');
50 | }
51 |
52 | static async makeCompletionRequest({
53 | config,
54 | onStreamChunk,
55 | attempt = 0,
56 | signal,
57 | agent,
58 | }: {
59 | config: MagmaCompletionConfig;
60 | onStreamChunk?: (chunk: MagmaStreamChunk | null) => Promise;
61 | attempt: number;
62 | signal?: AbortSignal;
63 | agent: MagmaAgent;
64 | }): Promise {
65 | config;
66 | onStreamChunk;
67 | attempt;
68 | signal;
69 | throw new Error('Provider.makeCompletionRequest not implemented');
70 | }
71 |
72 | static convertTools(tools: MagmaTool[]): object[] {
73 | tools;
74 | throw new Error('Provider.convertTools not implemented');
75 | }
76 |
77 | static convertConfig(config: MagmaCompletionConfig): object {
78 | config;
79 | throw new Error('Provider.convertConfig not implemented');
80 | }
81 |
82 | static convertStopReason(stop_reason: string): MagmaCompletionStopReason {
83 | stop_reason;
84 | throw new Error('Provider.convertStopReason not implemented');
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/src/providers/openai.ts:
--------------------------------------------------------------------------------
1 | import OpenAI from 'openai';
2 | import { MAX_RETRIES, Provider } from '.';
3 | import {
4 | MagmaAssistantMessage,
5 | MagmaCompletion,
6 | MagmaCompletionConfig,
7 | MagmaCompletionStopReason,
8 | MagmaMessage,
9 | MagmaStreamChunk,
10 | MagmaTextBlock,
11 | MagmaTool,
12 | MagmaToolCallBlock,
13 | MagmaToolParam,
14 | MagmaUsage,
15 | OpenAIProviderConfig,
16 | } from '../types';
17 | import {
18 | ChatCompletionMessageParam as OpenAIMessageParam,
19 | ChatCompletionTool as OpenAITool,
20 | } from 'openai/resources/index';
21 | import { cleanParam, sleep } from '../helpers';
22 | import {
23 | ChatCompletion,
24 | ChatCompletionChunk,
25 | ChatCompletionUserMessageParam,
26 | } from 'openai/resources/chat/completions';
27 | import { ChatCompletionCreateParamsBase } from 'openai/resources/chat/completions/completions';
28 | import { safeJSON } from 'openai/core';
29 | import type { MagmaAgent } from '../agent';
30 |
31 | export class OpenAIProvider extends Provider {
32 | static override async makeCompletionRequest({
33 | config,
34 | onStreamChunk,
35 | attempt = 0,
36 | signal,
37 | agent,
38 | }: {
39 | config: MagmaCompletionConfig;
40 | onStreamChunk?: (chunk: MagmaStreamChunk | null) => Promise;
41 | attempt: number;
42 | signal?: AbortSignal;
43 | agent: MagmaAgent;
44 | }): Promise {
45 | try {
46 | const openai = config.providerConfig.client as OpenAI;
47 | if (!openai) throw new Error('OpenAI instance not configured');
48 |
49 | const openAIConfig = this.convertConfig(config);
50 |
51 | if (config.stream) {
52 | const stream = await openai.chat.completions.create(
53 | {
54 | ...openAIConfig,
55 | stream: true,
56 | stream_options: { include_usage: true },
57 | },
58 | { signal }
59 | );
60 |
61 | let contentBuffer = '';
62 | const usage: MagmaUsage = {
63 | input_tokens: 0,
64 | output_tokens: 0,
65 | cache_write_tokens: 0,
66 | cache_read_tokens: 0,
67 | };
68 |
69 | let streamedToolCalls: {
70 | [index: number]: ChatCompletionChunk.Choice.Delta.ToolCall;
71 | } = {};
72 |
73 | let stopReason: MagmaCompletionStopReason = null;
74 |
75 | for await (const chunk of stream) {
76 | let magmaStreamChunk: MagmaStreamChunk = {
77 | id: chunk.id,
78 | provider: 'openai',
79 | model: chunk.model,
80 | delta: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
81 | buffer: new MagmaAssistantMessage({ role: 'assistant', blocks: [] }),
82 | usage: {
83 | input_tokens: null,
84 | output_tokens: null,
85 | cache_write_tokens: null,
86 | cache_read_tokens: null,
87 | },
88 | stop_reason: null,
89 | };
90 |
91 | const choice = chunk.choices[0];
92 | const delta = choice?.delta;
93 |
94 | if (choice?.finish_reason) {
95 | stopReason = this.convertStopReason(choice.finish_reason);
96 | magmaStreamChunk.stop_reason = stopReason;
97 | }
98 |
99 | for (const toolCall of delta?.tool_calls ?? []) {
100 | const { index } = toolCall;
101 |
102 | if (!streamedToolCalls[index]) {
103 | streamedToolCalls[index] = toolCall;
104 | } else {
105 | streamedToolCalls[index].function.arguments +=
106 | toolCall.function.arguments;
107 | }
108 | }
109 |
110 | if (chunk.usage) {
111 | usage.input_tokens =
112 | chunk.usage.prompt_tokens -
113 | (chunk.usage.prompt_tokens_details?.cached_tokens ?? 0);
114 | usage.output_tokens = chunk.usage.completion_tokens;
115 | usage.cache_write_tokens =
116 | chunk.usage.prompt_tokens_details?.cached_tokens ?? 0;
117 | usage.cache_read_tokens = 0;
118 | magmaStreamChunk.usage = {
119 | input_tokens: chunk.usage.prompt_tokens,
120 | output_tokens: chunk.usage.completion_tokens,
121 | cache_write_tokens:
122 | chunk.usage.prompt_tokens_details?.cached_tokens ?? 0,
123 | cache_read_tokens: 0,
124 | };
125 | }
126 |
127 | if (delta?.tool_calls) {
128 | const toolCallBlocks: MagmaToolCallBlock[] = delta.tool_calls.map(
129 | (toolCall) => ({
130 | type: 'tool_call',
131 | tool_call: {
132 | id: streamedToolCalls[toolCall.index].id,
133 | fn_name: toolCall.function.name,
134 | fn_args: safeJSON(toolCall.function.arguments) ?? {},
135 | fn_args_buffer: toolCall.function.arguments,
136 | },
137 | })
138 | );
139 | magmaStreamChunk.delta.blocks.push(...toolCallBlocks);
140 | }
141 |
142 | if (delta?.content) {
143 | const textBlock: MagmaTextBlock = {
144 | type: 'text',
145 | text: delta.content,
146 | };
147 | magmaStreamChunk.delta.blocks.push(textBlock);
148 | contentBuffer += delta.content;
149 | }
150 |
151 | if (contentBuffer.length > 0) {
152 | const bufferTextBlock: MagmaTextBlock = {
153 | type: 'text',
154 | text: contentBuffer,
155 | };
156 | magmaStreamChunk.buffer.blocks.push(bufferTextBlock);
157 | }
158 |
159 | if (Object.keys(streamedToolCalls).length > 0) {
160 | const bufferToolCallBlocks: MagmaToolCallBlock[] = Object.values(
161 | streamedToolCalls
162 | ).map((toolCall) => ({
163 | type: 'tool_call',
164 | tool_call: {
165 | id: toolCall.id,
166 | fn_name: toolCall.function.name,
167 | fn_args: safeJSON(toolCall.function.arguments) ?? {},
168 | fn_args_buffer: toolCall.function.arguments,
169 | },
170 | }));
171 | magmaStreamChunk.buffer.blocks.push(...bufferToolCallBlocks);
172 | }
173 |
174 | onStreamChunk?.(magmaStreamChunk);
175 | }
176 |
177 | let magmaMessage = new MagmaMessage({ role: 'assistant', blocks: [] });
178 |
179 | if (contentBuffer.length > 0) {
180 | magmaMessage.blocks.push({
181 | type: 'text',
182 | text: contentBuffer,
183 | });
184 | }
185 |
186 | const toolCalls = Object.values(streamedToolCalls);
187 | if (toolCalls.length > 0) {
188 | const toolCallBlocks: MagmaToolCallBlock[] = toolCalls.map((toolCall) => ({
189 | type: 'tool_call',
190 | tool_call: {
191 | id: toolCall.id,
192 | fn_name: toolCall.function.name,
193 | fn_args: safeJSON(toolCall.function.arguments) ?? {},
194 | fn_args_buffer: toolCall.function.arguments,
195 | },
196 | }));
197 | magmaMessage.blocks.push(...toolCallBlocks);
198 | }
199 |
200 | const magmaCompletion: MagmaCompletion = {
201 | provider: 'openai',
202 | model: openAIConfig.model,
203 | message: magmaMessage,
204 | usage,
205 | stop_reason: stopReason,
206 | };
207 |
208 | onStreamChunk?.(null);
209 |
210 | return magmaCompletion;
211 | } else {
212 | const openAICompletion = await openai.chat.completions.create(
213 | {
214 | ...openAIConfig,
215 | stream: false,
216 | },
217 | { signal }
218 | );
219 |
220 | const choice = openAICompletion.choices[0];
221 | const openAIMessage = choice?.message;
222 |
223 | let magmaMessage = new MagmaMessage({ role: 'assistant', blocks: [] });
224 |
225 | if (openAIMessage?.content) {
226 | magmaMessage.blocks.push({
227 | type: 'text',
228 | text: openAIMessage.content,
229 | });
230 | }
231 |
232 | if (openAIMessage?.tool_calls) {
233 | const toolCallBlocks: MagmaToolCallBlock[] = openAIMessage.tool_calls.map(
234 | (tool_call) => ({
235 | type: 'tool_call',
236 | tool_call: {
237 | id: tool_call.id,
238 | fn_name: tool_call.function.name,
239 | fn_args: safeJSON(tool_call.function.arguments) ?? {},
240 | fn_args_buffer: tool_call.function.arguments,
241 | },
242 | })
243 | );
244 | magmaMessage.blocks.push(...toolCallBlocks);
245 | }
246 |
247 | if (magmaMessage.blocks.length === 0) {
248 | console.log(JSON.stringify(openAICompletion.choices[0], null, 2));
249 | throw new Error('OpenAI completion was null');
250 | }
251 |
252 | const magmaCompletion: MagmaCompletion = {
253 | provider: 'openai',
254 | model: openAICompletion.model,
255 | message: magmaMessage,
256 | usage: {
257 | input_tokens:
258 | openAICompletion.usage.prompt_tokens -
259 | (openAICompletion.usage.prompt_tokens_details?.cached_tokens ?? 0),
260 | output_tokens: openAICompletion.usage.completion_tokens,
261 | cache_write_tokens: 0,
262 | cache_read_tokens:
263 | openAICompletion.usage.prompt_tokens_details?.cached_tokens ?? 0,
264 | },
265 | stop_reason: this.convertStopReason(choice?.finish_reason),
266 | };
267 |
268 | return magmaCompletion;
269 | }
270 | } catch (error) {
271 | if (signal?.aborted) {
272 | return null;
273 | }
274 | if (error.response && error.response.status === 429) {
275 | if (attempt >= MAX_RETRIES) {
276 | throw new Error(`Rate limited after ${MAX_RETRIES} attempts`);
277 | }
278 | const delay = Math.min(Math.pow(2, attempt) * 1000, 60000);
279 | agent.log(`Rate limited. Retrying after ${delay}ms.`);
280 |
281 | await sleep(delay);
282 | return this.makeCompletionRequest({
283 | config,
284 | onStreamChunk,
285 | attempt: attempt + 1,
286 | signal,
287 | agent,
288 | });
289 | } else {
290 | throw error;
291 | }
292 | }
293 | }
294 |
295 | // Tool schema to LLM function call converter
296 | static override convertTools(tools: MagmaTool[]): OpenAITool[] | undefined {
297 | if (tools.length === 0) return undefined;
298 | const openAITools: OpenAITool[] = [];
299 |
300 | for (const tool of tools) {
301 | const baseObject: MagmaToolParam = {
302 | type: 'object',
303 | properties: tool.params,
304 | };
305 |
306 | openAITools.push({
307 | function: {
308 | name: tool.name,
309 | description: tool.description,
310 | parameters: cleanParam(baseObject, []),
311 | },
312 | type: 'function',
313 | });
314 | }
315 |
316 | return openAITools;
317 | }
318 |
319 | // MagmaConfig to Provider-specific config converter
320 | static override convertConfig(config: MagmaCompletionConfig): ChatCompletionCreateParamsBase {
321 | let tool_choice = undefined;
322 |
323 | if (config.tool_choice === 'auto') tool_choice = 'auto';
324 | else if (config.tool_choice === 'required') tool_choice = 'required';
325 | else if (typeof config.tool_choice === 'string')
326 | tool_choice = { type: 'function', function: { name: config.tool_choice } };
327 |
328 | const { model, settings } = config.providerConfig as OpenAIProviderConfig;
329 |
330 | delete config.providerConfig;
331 |
332 | const openAIConfig: ChatCompletionCreateParamsBase = {
333 | ...config,
334 | model,
335 | messages: this.convertMessages(config.messages),
336 | tools: this.convertTools(config.tools),
337 | tool_choice: tool_choice,
338 | ...settings,
339 | };
340 |
341 | return openAIConfig;
342 | }
343 |
344 | // MagmaMessage to Provider-specific message converter
345 | static override convertMessages(messages: MagmaMessage[]): OpenAIMessageParam[] {
346 | const openAIMessages: OpenAIMessageParam[] = [];
347 |
348 | for (const message of messages) {
349 | if ('id' in message) delete message.id;
350 |
351 | switch (message.role) {
352 | case 'system':
353 | openAIMessages.push({
354 | role: 'system',
355 | content: message.blocks.map(
356 | (b) =>
357 | ({
358 | type: 'text',
359 | text: message.getText(),
360 | cache_control: b.cache ? { type: 'ephemeral' } : undefined,
361 | }) as any
362 | ),
363 | });
364 | break;
365 | case 'assistant':
366 | const reasoning = message.getReasoning();
367 | const assistantText = message.getText();
368 | const toolCalls = message.getToolCalls();
369 |
370 | let textWithReasoning = '';
371 | if (reasoning.length > 0)
372 | textWithReasoning += `${reasoning}\n`;
373 | if (assistantText.length > 0) textWithReasoning += `${assistantText}`;
374 |
375 | if (textWithReasoning.length > 0) {
376 | openAIMessages.push({
377 | role: 'assistant',
378 | content: textWithReasoning,
379 | });
380 | }
381 |
382 | if (toolCalls.length > 0) {
383 | openAIMessages.push({
384 | role: 'assistant',
385 | tool_calls: toolCalls.map((toolCall) => ({
386 | type: 'function',
387 | id: toolCall.id,
388 | function: {
389 | name: toolCall.fn_name,
390 | arguments: JSON.stringify(toolCall.fn_args),
391 | },
392 | })),
393 | });
394 | }
395 | break;
396 | case 'user':
397 | const userText = message.getText();
398 | const images = message.getImages();
399 | const toolResults = message.getToolResults();
400 |
401 | const content: ChatCompletionUserMessageParam['content'] = [];
402 |
403 | if (userText.length > 0) {
404 | content.push({ type: 'text', text: userText });
405 | }
406 |
407 | for (const image of images) {
408 | // If image is a string, it is a url
409 | if (image.type === 'image/url') {
410 | content.push({
411 | type: 'image_url',
412 | image_url: {
413 | url: image.data,
414 | },
415 | });
416 | } else {
417 | content.push({
418 | type: 'image_url',
419 | image_url: {
420 | url: `data:${image.type};base64,${image.data}`,
421 | },
422 | });
423 | }
424 | }
425 |
426 | if (toolResults.length > 0) {
427 | for (const toolResult of toolResults) {
428 | openAIMessages.push({
429 | role: 'tool',
430 | tool_call_id: toolResult.id,
431 | content: toolResult.error
432 | ? `Something went wrong calling your last tool - \n ${typeof toolResult.result !== 'string' ? JSON.stringify(toolResult.result) : toolResult.result}`
433 | : typeof toolResult.result !== 'string'
434 | ? JSON.stringify(toolResult.result)
435 | : toolResult.result,
436 | });
437 | }
438 | }
439 |
440 | if (content.length > 0) {
441 | openAIMessages.push({
442 | role: 'user',
443 | content,
444 | });
445 | }
446 | break;
447 | }
448 | }
449 |
450 | return openAIMessages;
451 | }
452 |
453 | static override convertStopReason(
454 | stop_reason: ChatCompletion.Choice['finish_reason']
455 | ): MagmaCompletionStopReason {
456 | switch (stop_reason) {
457 | case 'stop':
458 | return 'natural';
459 | case 'tool_calls':
460 | case 'function_call':
461 | return 'tool_call';
462 | case 'content_filter':
463 | return 'content_filter';
464 | case 'length':
465 | return 'max_tokens';
466 | default:
467 | return 'unknown';
468 | }
469 | }
470 | }
471 |
--------------------------------------------------------------------------------
/src/types/agent.ts:
--------------------------------------------------------------------------------
1 | import { MagmaProviderConfig } from './providers';
2 | import { MagmaMessage } from './messages';
3 | import { MagmaTool } from './utilities/tools';
4 |
5 | export type MagmaCompletionConfig = {
6 | providerConfig: MagmaProviderConfig;
7 | messages: MagmaMessage[];
8 | tools: MagmaTool[];
9 | tool_choice?: 'auto' | 'required' | (string & {});
10 | stream?: boolean;
11 | };
12 |
--------------------------------------------------------------------------------
/src/types/index.ts:
--------------------------------------------------------------------------------
1 | export * from './agent';
2 | export * from './messages';
3 | export * from './providers';
4 | export * from './utilities';
5 |
--------------------------------------------------------------------------------
/src/types/messages.ts:
--------------------------------------------------------------------------------
1 | import { MagmaProvider } from './providers';
2 | import { MagmaToolReturnType } from './utilities';
3 |
4 | export type MagmaCompletionStopReason =
5 | | 'natural'
6 | | 'tool_call'
7 | | 'content_filter'
8 | | 'max_tokens'
9 | | 'unsupported'
10 | | 'unknown';
11 |
12 | export type MagmaCompletion = {
13 | message: MagmaMessage;
14 | provider: MagmaProvider;
15 | model: string;
16 | usage: MagmaUsage;
17 | stop_reason: MagmaCompletionStopReason;
18 | };
19 |
20 | export type MagmaUsage = {
21 | input_tokens: number;
22 | output_tokens: number;
23 | cache_write_tokens: number;
24 | cache_read_tokens: number;
25 | };
26 |
27 | export type MagmaImageType = 'image/png' | 'image/jpeg' | 'image/gif' | 'image/webp' | 'image/url';
28 |
29 | export type MagmaImage = {
30 | data: string;
31 | type: MagmaImageType;
32 | };
33 |
34 | // Provider-agnostic message type
35 | export type MagmaMessageType =
36 | | MagmaSystemMessageType
37 | | MagmaAssistantMessageType
38 | | MagmaUserMessageType;
39 |
40 | export type MagmaSystemMessageType = {
41 | id?: string | number;
42 | role: 'system';
43 | blocks?: MagmaContentBlock[];
44 | content?: string;
45 | cache?: boolean;
46 | };
47 |
48 | export type MagmaTextBlock = {
49 | type: 'text';
50 | text: string;
51 | };
52 |
53 | export type MagmaToolCallBlock = {
54 | type: 'tool_call';
55 | tool_call: MagmaToolCall;
56 | };
57 |
58 | export type MagmaToolResultBlock = {
59 | type: 'tool_result';
60 | tool_result: MagmaToolResult;
61 | };
62 |
63 | export type MagmaReasoningBlock = {
64 | type: 'reasoning';
65 | reasoning: string;
66 | redacted?: true;
67 | signature?: string;
68 | };
69 |
70 | export type MagmaImageBlock = {
71 | type: 'image';
72 | image: MagmaImage;
73 | };
74 |
75 | export type MagmaContentBlock = (
76 | | MagmaTextBlock
77 | | MagmaToolCallBlock
78 | | MagmaToolResultBlock
79 | | MagmaReasoningBlock
80 | | MagmaImageBlock
81 | ) & {
82 | cache?: boolean;
83 | };
84 |
85 | type MagmaUserMessageType = {
86 | id?: string | number;
87 | role: 'user';
88 | blocks?: MagmaContentBlock[];
89 | content?: string;
90 | };
91 |
92 | type MagmaAssistantMessageType = {
93 | id?: string | number;
94 | role: 'assistant';
95 | blocks?: MagmaContentBlock[];
96 | content?: string;
97 | };
98 |
99 | // Provider-agnostic tool/function type
100 | export type MagmaToolCall = {
101 | id: string;
102 | fn_name: string;
103 | fn_args: Record;
104 | fn_args_buffer?: string;
105 | error?: string;
106 | };
107 |
108 | export type MagmaToolResult = {
109 | id: string;
110 | result: MagmaToolReturnType;
111 | error?: boolean;
112 | fn_name: string;
113 | call: MagmaToolCall;
114 | };
115 |
116 | export type MagmaStreamChunk = {
117 | id: string;
118 | provider: MagmaProvider;
119 | model: string;
120 | delta: MagmaAssistantMessage;
121 | buffer: MagmaAssistantMessage;
122 | stop_reason: MagmaCompletionStopReason;
123 | usage: {
124 | input_tokens: number | null;
125 | output_tokens: number | null;
126 | cache_write_tokens: number | null;
127 | cache_read_tokens: number | null;
128 | };
129 | };
130 |
131 | export class MagmaMessage {
132 | id?: string | number;
133 | role: MagmaMessageType['role'];
134 | blocks: MagmaContentBlock[];
135 |
136 | constructor({ role, content, blocks, id }: MagmaMessageType) {
137 | this.id = id;
138 | this.role = role;
139 | if (content && blocks) {
140 | throw new Error('Cannot provide both content and blocks to MagmaMessage constructor');
141 | }
142 |
143 | if (content || content === '') {
144 | this.blocks = [
145 | {
146 | type: 'text',
147 | text: content,
148 | },
149 | ];
150 | } else if (blocks) {
151 | this.blocks = blocks;
152 | }
153 | }
154 |
155 | public getText(): string {
156 | return this.blocks
157 | .filter((block) => block.type === 'text')
158 | .map((block) => block.text)
159 | .join('\n');
160 | }
161 |
162 | public getToolCalls(): MagmaToolCall[] {
163 | return this.blocks
164 | .filter((block) => block.type === 'tool_call')
165 | .map((block) => block.tool_call);
166 | }
167 |
168 | public getToolResults(): MagmaToolResult[] {
169 | return this.blocks
170 | .filter((block) => block.type === 'tool_result')
171 | .map((block) => block.tool_result);
172 | }
173 |
174 | public getReasoning(): string {
175 | return this.blocks
176 | .filter((block) => block.type === 'reasoning' && !block.redacted)
177 | .map((block: MagmaReasoningBlock) => block.reasoning)
178 | .join('\n');
179 | }
180 |
181 | public getImages(): MagmaImage[] {
182 | return this.blocks.filter((block) => block.type === 'image').map((block) => block.image);
183 | }
184 |
185 | public get content(): string {
186 | return this.getText();
187 | }
188 | }
189 |
190 | export class MagmaUserMessage extends MagmaMessage {
191 | role: 'user' = 'user';
192 | constructor(magmaUserMessage: MagmaUserMessageType) {
193 | super(magmaUserMessage);
194 | }
195 | }
196 |
197 | export class MagmaAssistantMessage extends MagmaMessage {
198 | role: 'assistant' = 'assistant';
199 | constructor(magmaAssistantMessage: MagmaAssistantMessageType) {
200 | super(magmaAssistantMessage);
201 | }
202 | }
203 |
204 | export class MagmaSystemMessage extends MagmaMessage {
205 | role: 'system' = 'system';
206 | constructor(magmaSystemMessage: MagmaSystemMessageType) {
207 | super(magmaSystemMessage);
208 | this.blocks.forEach((block) => {
209 | block.cache = block.cache ?? magmaSystemMessage.cache;
210 | });
211 | }
212 | }
213 |
--------------------------------------------------------------------------------
/src/types/providers.ts:
--------------------------------------------------------------------------------
1 | import Anthropic from '@anthropic-ai/sdk';
2 | import { ChatCompletionCreateParamsBase as OpenAISettings } from 'openai/resources/chat/completions/completions';
3 | import {
4 | GoogleGenerativeAI,
5 | ModelParams as GoogleModelParams,
6 | GenerationConfig as GoogleSettings,
7 | } from '@google/generative-ai';
8 | import Groq from 'groq-sdk';
9 | import { ChatCompletionCreateParams as GroqSettings } from 'groq-sdk/resources/chat/completions';
10 | import OpenAI from 'openai';
11 |
12 | export const MagmaProviders = ['openai', 'anthropic', 'groq', 'google'] as const;
13 | export type MagmaProvider = (typeof MagmaProviders)[number];
14 |
15 | export type MagmaClient = OpenAI | Anthropic | Groq | GoogleGenerativeAI;
16 |
17 | export type AnthropicModel = Anthropic.Messages.Model;
18 |
19 | export type OpenAIModel = OpenAISettings['model'];
20 |
21 | export type GroqModel = GroqSettings['model'];
22 |
23 | export type GoogleModel = GoogleModelParams['model'];
24 |
25 | export type MagmaModel = AnthropicModel | OpenAIModel | GroqModel | GoogleModel;
26 |
27 | type MagmaOpenAISettings = Omit<
28 | OpenAISettings,
29 | 'messages' | 'model' | 'function_call' | 'functions' | 'stream' | 'stream_options' | 'tools'
30 | >;
31 | type MagmaAnthropicSettings = Omit<
32 | Anthropic.Messages.MessageCreateParams,
33 | 'max_tokens' | 'messages' | 'model' | 'stream' | 'tools' | 'system'
34 | > & { max_tokens?: number };
35 | type MagmaGroqSettings = Omit<
36 | GroqSettings,
37 | 'messages' | 'model' | 'function_call' | 'functions' | 'stream' | 'tools'
38 | >;
39 | type MagmaGoogleSettings = Omit;
40 |
41 | export type OpenAIProviderConfig = {
42 | client?: object;
43 | provider: 'openai';
44 | model: OpenAIModel;
45 | settings?: MagmaOpenAISettings;
46 | };
47 |
48 | export type AnthropicProviderConfig = {
49 | client?: object;
50 | provider: 'anthropic';
51 | model: AnthropicModel;
52 | settings?: MagmaAnthropicSettings;
53 | };
54 |
55 | export type GroqProviderConfig = {
56 | client?: object;
57 | provider: 'groq';
58 | model: GroqModel;
59 | settings?: MagmaGroqSettings;
60 | };
61 |
62 | export type GoogleProviderConfig = {
63 | client?: object;
64 | provider: 'google';
65 | model: GoogleModel;
66 | settings?: MagmaGoogleSettings;
67 | };
68 |
69 | export type MagmaProviderConfig =
70 | | OpenAIProviderConfig
71 | | AnthropicProviderConfig
72 | | GroqProviderConfig
73 | | GoogleProviderConfig;
74 |
--------------------------------------------------------------------------------
/src/types/utilities/hooks.ts:
--------------------------------------------------------------------------------
1 | import { Request, Response } from 'express';
2 | import { MagmaAgent } from '../../agent';
3 |
4 | export type MagmaHook = {
5 | name: string;
6 | handler: (request: Request, response: Response, agent: MagmaAgent) => Promise;
7 | session?: 'default' | (string & {}) | ((req: Request) => string | Promise);
8 | };
9 |
--------------------------------------------------------------------------------
/src/types/utilities/index.ts:
--------------------------------------------------------------------------------
1 | import { MagmaHook } from './hooks';
2 | import { MagmaJob } from './jobs';
3 | import { MagmaMiddleware } from './middleware';
4 | import { MagmaTool } from './tools';
5 |
6 | export * from './hooks';
7 | export * from './jobs';
8 | export * from './middleware';
9 | export * from './tools';
10 |
11 | export type MagmaUtilities = {
12 | tools?: MagmaTool[];
13 | middleware?: MagmaMiddleware[];
14 | hooks?: MagmaHook[];
15 | jobs?: MagmaJob[];
16 | };
17 |
--------------------------------------------------------------------------------
/src/types/utilities/jobs.ts:
--------------------------------------------------------------------------------
1 | import { MagmaAgent } from '../../agent';
2 |
3 | export interface MagmaJob {
4 | handler: (agent: MagmaAgent) => Promise;
5 | schedule: string;
6 | options?: { timezone?: string };
7 | name?: string;
8 | }
9 |
--------------------------------------------------------------------------------
/src/types/utilities/middleware.ts:
--------------------------------------------------------------------------------
1 | import { MagmaAgent } from '../../agent';
2 | import { MagmaToolCall, MagmaToolResult } from '../index';
3 |
4 | export const MagmaMiddlewareTriggers = [
5 | 'onCompletion',
6 | 'preCompletion',
7 | 'onToolExecution',
8 | 'preToolExecution',
9 | 'onMainFinish',
10 | 'postProcess',
11 | ] as const;
12 |
13 | export type MagmaMiddlewareTriggerType = (typeof MagmaMiddlewareTriggers)[number];
14 |
15 | export type MagmaMiddlewareReturnType =
16 | T extends 'preCompletion'
17 | ? string | void
18 | : T extends 'onCompletion'
19 | ? string | void
20 | : T extends 'preToolExecution'
21 | ? MagmaToolCall | void
22 | : T extends 'onToolExecution'
23 | ? MagmaToolResult | void
24 | : T extends 'onMainFinish'
25 | ? string | void
26 | : T extends 'postProcess'
27 | ? string | void
28 | : never;
29 |
30 | export type MagmaMiddlewareParamType =
31 | T extends 'preToolExecution'
32 | ? MagmaToolCall
33 | : T extends 'onToolExecution'
34 | ? MagmaToolResult
35 | : T extends 'preCompletion'
36 | ? string
37 | : T extends 'onCompletion'
38 | ? string
39 | : T extends 'onMainFinish'
40 | ? string
41 | : T extends 'postProcess'
42 | ? string
43 | : never;
44 |
45 | export type MagmaMiddleware = {
46 | trigger: MagmaMiddlewareTriggerType;
47 | action: (
48 | message: MagmaMiddlewareParamType,
49 | agent: MagmaAgent
50 | ) =>
51 | | Promise>
52 | | MagmaMiddlewareReturnType;
53 | name?: string;
54 | critical?: boolean;
55 | order?: number;
56 | };
57 |
--------------------------------------------------------------------------------
/src/types/utilities/tools.ts:
--------------------------------------------------------------------------------
1 | import { MagmaAgent } from '../../agent';
2 | import { MagmaToolCall } from '../messages';
3 |
4 | export type MagmaToolParamType = 'string' | 'number' | 'object' | 'boolean' | 'array';
5 |
6 | export type MagmaToolObjectParam = {
7 | type: 'object';
8 | description?: string;
9 | properties: (MagmaToolParam & { key: string; required?: boolean })[];
10 | };
11 |
12 | export type MagmaToolArrayParam = {
13 | type: 'array';
14 | description?: string;
15 | items: MagmaToolParam;
16 | limit?: number;
17 | };
18 |
19 | export type MagmaToolStringParam = {
20 | type: 'string';
21 | description?: string;
22 | enum?: string[];
23 | };
24 |
25 | export type MagmaToolNumberParam = {
26 | type: 'number';
27 | description?: string;
28 | enum?: number[];
29 | };
30 |
31 | export type MagmaToolBooleanParam = {
32 | type: 'boolean';
33 | description?: string;
34 | };
35 |
36 | export type MagmaToolParam =
37 | | MagmaToolObjectParam
38 | | MagmaToolArrayParam
39 | | MagmaToolStringParam
40 | | MagmaToolNumberParam
41 | | MagmaToolBooleanParam;
42 |
43 | export type MagmaToolReturnType = string | Record;
44 |
45 | // Target in-code function that a MagmaTool maps to
46 | export type MagmaToolTarget = (
47 | call: MagmaToolCall,
48 | agent: MagmaAgent
49 | ) => MagmaToolReturnType | Promise;
50 | // Tool type containing the json schema sent to the LLM and the target to be called with the generated args
51 | export type MagmaTool = {
52 | name: string;
53 | description: string;
54 | params: (MagmaToolParam & { key: string; required?: boolean })[];
55 | target: MagmaToolTarget;
56 | enabled?: (agent: MagmaAgent) => boolean;
57 | cache?: boolean;
58 | };
59 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "include": ["src"],
3 | "exclude": ["node_modules/**/*.ts"],
4 | "compilerOptions": {
5 | "declaration": true,
6 | "declarationMap": true,
7 | "module": "CommonJS",
8 | "outDir": "dist/",
9 | "sourceMap": true,
10 | "target": "ES2015",
11 |
12 | "strict": false,
13 |
14 | "esModuleInterop": true,
15 | "moduleResolution": "node",
16 |
17 | "forceConsistentCasingInFileNames": true,
18 | "allowSyntheticDefaultImports": true,
19 |
20 | "experimentalDecorators": true,
21 | "emitDecoratorMetadata": true,
22 |
23 | "skipLibCheck": true
24 | },
25 | }
--------------------------------------------------------------------------------
/tsup.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'tsup';
2 |
3 | export default defineConfig({
4 | entry: {
5 | 'index': 'src/index.ts',
6 | 'decorators': 'src/decorators.ts',
7 | 'types/index': 'src/types/index.ts',
8 | },
9 | format: ['cjs', 'esm'],
10 | dts: true,
11 | clean: true,
12 | splitting: false,
13 | treeshake: true,
14 | external: [
15 | 'ws',
16 | 'openai',
17 | '@anthropic-ai/sdk',
18 | '@google/generative-ai',
19 | 'groq-sdk',
20 | ],
21 | });
--------------------------------------------------------------------------------