├── .github
├── CODEOWNERS
└── workflows
│ ├── ci.yml
│ └── publish.yml
├── .gitignore
├── .husky
└── pre-commit
├── .prettierignore
├── .prettierrc
├── Dockerfile
├── LICENSE
├── README.md
├── eslint.config.js
├── jest.config.ts
├── package.json
├── pnpm-lock.yaml
├── pnpm-workspace.yaml
├── smithery.yaml
├── src
├── index.ts
├── tools
│ ├── dashboards
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ ├── downtimes
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ ├── hosts
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ ├── incident
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ ├── logs
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ ├── metrics
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ ├── monitors
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ ├── rum
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
│ └── traces
│ │ ├── index.ts
│ │ ├── schema.ts
│ │ └── tool.ts
└── utils
│ ├── datadog.ts
│ ├── helper.ts
│ ├── tool.ts
│ └── types.ts
├── tests
├── helpers
│ ├── datadog.ts
│ ├── mock.ts
│ └── msw.ts
├── setup.ts
├── tools
│ ├── dashboards.test.ts
│ ├── downtimes.test.ts
│ ├── hosts.test.ts
│ ├── incident.test.ts
│ ├── logs.test.ts
│ ├── metrics.test.ts
│ ├── monitors.test.ts
│ ├── rum.test.ts
│ └── traces.test.ts
└── utils
│ ├── datadog.test.ts
│ └── tool.test.ts
├── tsconfig.json
├── tsup.config.ts
└── vitest.config.ts
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @winor30
2 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Check out repository code
14 | uses: actions/checkout@v4
15 |
16 | - name: Set up Node
17 | uses: actions/setup-node@v4
18 | with:
19 | node-version: 20
20 |
21 | - uses: pnpm/action-setup@v4
22 | with:
23 | version: 10
24 |
25 | - name: Install dependencies
26 | run: pnpm install --frozen-lockfile
27 |
28 | - name: Run ESLint
29 | run: pnpm run lint
30 |
31 | format:
32 | runs-on: ubuntu-latest
33 | steps:
34 | - name: Check out repository code
35 | uses: actions/checkout@v4
36 |
37 | - name: Set up Node
38 | uses: actions/setup-node@v4
39 | with:
40 | node-version: 20
41 |
42 | - uses: pnpm/action-setup@v4
43 | with:
44 | version: 10
45 |
46 | - name: Install dependencies
47 | run: pnpm install --frozen-lockfile
48 |
49 | - name: Check code format with Prettier
50 | run: pnpm exec prettier --check .
51 |
52 | build:
53 | runs-on: ubuntu-latest
54 | steps:
55 | - name: Check out repository code
56 | uses: actions/checkout@v4
57 |
58 | - name: Set up Node
59 | uses: actions/setup-node@v4
60 | with:
61 | node-version: 20
62 |
63 | - uses: pnpm/action-setup@v4
64 | with:
65 | version: 10
66 |
67 | - name: Install dependencies
68 | run: pnpm install --frozen-lockfile
69 |
70 | - name: Build
71 | run: pnpm run build
72 |
73 | test:
74 | permissions:
75 | contents: read
76 | pull-requests: write
77 | runs-on: ubuntu-latest
78 | steps:
79 | - name: Checkout
80 | uses: actions/checkout@v4
81 | with:
82 | fetch-depth: 0
83 |
84 | - name: Set up Node
85 | uses: actions/setup-node@v4
86 | with:
87 | node-version: 20
88 |
89 | - uses: pnpm/action-setup@v4
90 | with:
91 | version: 10
92 |
93 | - name: Install dependencies
94 | run: pnpm install --frozen-lockfile
95 |
96 | - name: Run tests
97 | run: pnpm test:coverage
98 |
99 | - name: Upload results to Codecov
100 | uses: codecov/codecov-action@v5
101 | with:
102 | token: ${{ secrets.CODECOV_TOKEN }}
103 | directory: coverage
104 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish to npm
2 | on:
3 | push:
4 | tags:
5 | - 'v*.*.*'
6 |
7 | jobs:
8 | publish:
9 | runs-on: ubuntu-latest
10 |
11 | permissions:
12 | contents: read
13 | id-token: write
14 |
15 | steps:
16 | - name: Checkout
17 | uses: actions/checkout@v4
18 |
19 | - name: Set up Node
20 | uses: actions/setup-node@v4
21 | with:
22 | node-version: 20
23 | registry-url: 'https://registry.npmjs.org/'
24 |
25 | - uses: pnpm/action-setup@v4
26 | with:
27 | version: 10
28 |
29 | - name: Install dependencies
30 | run: pnpm install --frozen-lockfile
31 |
32 | - name: Build
33 | run: pnpm run build
34 |
35 | - name: Publish
36 | run: pnpm publish --provenance --access public --no-git-checks
37 | env:
38 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
39 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | *.log
3 | npm-debug.log*
4 | yarn-debug.log*
5 | yarn-error.log*
6 | lerna-debug.log*
7 | .pnpm-debug.log*
8 |
9 | # Diagnostic reports (https://nodejs.org/api/report.html)
10 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
11 |
12 | # Runtime data
13 | pids
14 | *.pid
15 | *.seed
16 | *.pid.lock
17 |
18 | # Directory for instrumented libs generated by jscoverage/JSCover
19 | lib-cov
20 |
21 | # Coverage directory used by tools like istanbul
22 | coverage
23 | *.lcov
24 |
25 | # nyc test coverage
26 | .nyc_output
27 |
28 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
29 | .grunt
30 |
31 | # Bower dependency directory (https://bower.io/)
32 | bower_components
33 |
34 | # node-waf configuration
35 | .lock-wscript
36 |
37 | # Compiled binary addons (https://nodejs.org/api/addons.html)
38 | build/
39 |
40 | # Dependency directories
41 | node_modules/
42 | jspm_packages/
43 |
44 | # Snowpack dependency directory (https://snowpack.dev/)
45 | web_modules/
46 |
47 | # TypeScript cache
48 | *.tsbuildinfo
49 |
50 | # Optional npm cache directory
51 | .npm
52 |
53 | # Optional eslint cache
54 | .eslintcache
55 |
56 | # Optional stylelint cache
57 | .stylelintcache
58 |
59 | # Microbundle cache
60 | .rpt2_cache/
61 | .rts2_cache_cjs/
62 | .rts2_cache_es/
63 | .rts2_cache_umd/
64 |
65 | # Optional REPL history
66 | .node_repl_history
67 |
68 | # Output of 'npm pack'
69 | *.tgz
70 |
71 | # Yarn Integrity file
72 | .yarn-integrity
73 |
74 | # dotenv environment variable files
75 | .env
76 | .env.development.local
77 | .env.test.local
78 | .env.production.local
79 | .env.local
80 |
81 | # parcel-bundler cache (https://parceljs.org/)
82 | .cache
83 | .parcel-cache
84 |
85 | # Next.js build output
86 | .next
87 | out
88 |
89 | # Nuxt.js build / generate output
90 | .nuxt
91 | dist
92 |
93 | # Gatsby files
94 | .cache/
95 | # Comment in the public line in if your project uses Gatsby and not Next.js
96 | # https://nextjs.org/blog/next-9-1#public-directory-support
97 | # public
98 |
99 | # vuepress build output
100 | .vuepress/dist
101 |
102 | # vuepress v2.x temp and cache directory
103 | .temp
104 | .cache
105 |
106 | # Docusaurus cache and generated files
107 | .docusaurus
108 |
109 | # Serverless directories
110 | .serverless/
111 |
112 | # FuseBox cache
113 | .fusebox/
114 |
115 | # DynamoDB Local files
116 | .dynamodb/
117 |
118 | # TernJS port file
119 | .tern-port
120 |
121 | # Stores VSCode versions used for testing VSCode extensions
122 | .vscode-test
123 |
124 | # yarn v2
125 | .yarn/cache
126 | .yarn/unplugged
127 | .yarn/build-state.yml
128 | .yarn/install-state.gz
129 | .pnp.*
130 |
--------------------------------------------------------------------------------
/.husky/pre-commit:
--------------------------------------------------------------------------------
1 | npx lint-staged
2 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | pnpm-lock.yaml
2 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {
2 | "singleQuote": true,
3 | "semi": false,
4 | "useTabs": false,
5 | "trailingComma": "all",
6 | "printWidth": 80
7 | }
8 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
2 | FROM node:22.12-alpine AS builder
3 |
4 | # Install pnpm globally
5 | RUN npm install -g pnpm@10
6 |
7 | WORKDIR /app
8 |
9 | # Copy package files and install dependencies
10 | COPY package.json pnpm-lock.yaml ./
11 | RUN pnpm install --frozen-lockfile --ignore-scripts
12 |
13 | # Copy the rest of the files
14 | COPY . .
15 |
16 | # Build the project
17 | RUN pnpm build
18 |
19 | FROM node:22.12-alpine AS installer
20 |
21 | # Install pnpm globally
22 | RUN npm install -g pnpm@10
23 |
24 | WORKDIR /app
25 |
26 | # Copy package files and install only production dependencies
27 | COPY package.json pnpm-lock.yaml ./
28 | RUN pnpm install --frozen-lockfile --ignore-scripts --prod
29 |
30 | FROM node:22.12-alpine AS release
31 |
32 | WORKDIR /app
33 |
34 | COPY --from=builder /app/build /app/build
35 | COPY --from=installer /app/node_modules /app/node_modules
36 |
37 | # Expose port if needed (Not explicitly mentioned, MCP runs via stdio, so not needed)
38 |
39 | CMD ["node", "build/index.js"]
40 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Datadog MCP Server
2 |
3 | > **DISCLAIMER**: This is a community-maintained project and is not officially affiliated with, endorsed by, or supported by Datadog, Inc. This MCP server utilizes the Datadog API but is developed independently as part of the [Model Context Protocol](https://github.com/modelcontextprotocol/servers) ecosystem.
4 |
5 | [](https://codecov.io/gh/winor30/mcp-server-datadog)[](https://smithery.ai/server/@winor30/mcp-server-datadog)
6 |
7 | MCP server for the Datadog API, enabling incident management and more.
8 |
9 |
10 |
11 |
12 |
13 | ## Features
14 |
15 | - **Observability Tools**: Provides a mechanism to leverage key Datadog monitoring features, such as incidents, monitors, logs, dashboards, and metrics, through the MCP server.
16 | - **Extensible Design**: Designed to easily integrate with additional Datadog APIs, allowing for seamless future feature expansion.
17 |
18 | ## Tools
19 |
20 | 1. `list_incidents`
21 |
22 | - Retrieve a list of incidents from Datadog.
23 | - **Inputs**:
24 | - `filter` (optional string): Filter parameters for incidents (e.g., status, priority).
25 | - `pagination` (optional object): Pagination details like page size/offset.
26 | - **Returns**: Array of Datadog incidents and associated metadata.
27 |
28 | 2. `get_incident`
29 |
30 | - Retrieve detailed information about a specific Datadog incident.
31 | - **Inputs**:
32 | - `incident_id` (string): Incident ID to fetch details for.
33 | - **Returns**: Detailed incident information (title, status, timestamps, etc.).
34 |
35 | 3. `get_monitors`
36 |
37 | - Fetch the status of Datadog monitors.
38 | - **Inputs**:
39 | - `groupStates` (optional array): States to filter (e.g., alert, warn, no data, ok).
40 | - `name` (optional string): Filter by name.
41 | - `tags` (optional array): Filter by tags.
42 | - **Returns**: Monitors data and a summary of their statuses.
43 |
44 | 4. `get_logs`
45 |
46 | - Search and retrieve logs from Datadog.
47 | - **Inputs**:
48 | - `query` (string): Datadog logs query string.
49 | - `from` (number): Start time in epoch seconds.
50 | - `to` (number): End time in epoch seconds.
51 | - `limit` (optional number): Maximum number of logs to return (defaults to 100).
52 | - **Returns**: Array of matching logs.
53 |
54 | 5. `list_dashboards`
55 |
56 | - Get a list of dashboards from Datadog.
57 | - **Inputs**:
58 | - `name` (optional string): Filter dashboards by name.
59 | - `tags` (optional array): Filter dashboards by tags.
60 | - **Returns**: Array of dashboards with URL references.
61 |
62 | 6. `get_dashboard`
63 |
64 | - Retrieve a specific dashboard from Datadog.
65 | - **Inputs**:
66 | - `dashboard_id` (string): ID of the dashboard to fetch.
67 | - **Returns**: Dashboard details including title, widgets, etc.
68 |
69 | 7. `query_metrics`
70 |
71 | - Retrieve metrics data from Datadog.
72 | - **Inputs**:
73 | - `query` (string): Metrics query string.
74 | - `from` (number): Start time in epoch seconds.
75 | - `to` (number): End time in epoch seconds.
76 | - **Returns**: Metrics data for the queried timeframe.
77 |
78 | 8. `list_traces`
79 |
80 | - Retrieve a list of APM traces from Datadog.
81 | - **Inputs**:
82 | - `query` (string): Datadog APM trace query string.
83 | - `from` (number): Start time in epoch seconds.
84 | - `to` (number): End time in epoch seconds.
85 | - `limit` (optional number): Maximum number of traces to return (defaults to 100).
86 | - `sort` (optional string): Sort order for traces (defaults to '-timestamp').
87 | - `service` (optional string): Filter by service name.
88 | - `operation` (optional string): Filter by operation name.
89 | - **Returns**: Array of matching traces from Datadog APM.
90 |
91 | 9. `list_hosts`
92 |
93 | - Get list of hosts from Datadog.
94 | - **Inputs**:
95 | - `filter` (optional string): Filter string for search results.
96 | - `sort_field` (optional string): Field to sort hosts by.
97 | - `sort_dir` (optional string): Sort direction (asc/desc).
98 | - `start` (optional number): Starting offset for pagination.
99 | - `count` (optional number): Max number of hosts to return (max: 1000).
100 | - `from` (optional number): Search hosts from this UNIX timestamp.
101 | - `include_muted_hosts_data` (optional boolean): Include muted hosts status and expiry.
102 | - `include_hosts_metadata` (optional boolean): Include host metadata (version, platform, etc).
103 | - **Returns**: Array of hosts with details including name, ID, aliases, apps, mute status, and more.
104 |
105 | 10. `get_active_hosts_count`
106 |
107 | - Get the total number of active hosts in Datadog.
108 | - **Inputs**:
109 | - `from` (optional number): Number of seconds from which you want to get total number of active hosts (defaults to 2h).
110 | - **Returns**: Count of total active and up hosts.
111 |
112 | 11. `mute_host`
113 |
114 | - Mute a host in Datadog.
115 | - **Inputs**:
116 | - `hostname` (string): The name of the host to mute.
117 | - `message` (optional string): Message to associate with the muting of this host.
118 | - `end` (optional number): POSIX timestamp for when the mute should end.
119 | - `override` (optional boolean): If true and the host is already muted, replaces existing end time.
120 | - **Returns**: Success status and confirmation message.
121 |
122 | 12. `unmute_host`
123 |
124 | - Unmute a host in Datadog.
125 | - **Inputs**:
126 | - `hostname` (string): The name of the host to unmute.
127 | - **Returns**: Success status and confirmation message.
128 |
129 | 13. `list_downtimes`
130 |
131 | - List scheduled downtimes from Datadog.
132 | - **Inputs**:
133 | - `currentOnly` (optional boolean): Return only currently active downtimes when true.
134 | - `monitorId` (optional number): Filter by monitor ID.
135 | - **Returns**: Array of scheduled downtimes with details including scope, monitor information, and schedule.
136 |
137 | 14. `schedule_downtime`
138 |
139 | - Schedule a downtime in Datadog.
140 | - **Inputs**:
141 | - `scope` (string): Scope to apply downtime to (e.g. 'host:my-host').
142 | - `start` (optional number): UNIX timestamp for the start of the downtime.
143 | - `end` (optional number): UNIX timestamp for the end of the downtime.
144 | - `message` (optional string): A message to include with the downtime.
145 | - `timezone` (optional string): The timezone for the downtime (e.g. 'UTC', 'America/New_York').
146 | - `monitorId` (optional number): The ID of the monitor to mute.
147 | - `monitorTags` (optional array): A list of monitor tags for filtering.
148 | - `recurrence` (optional object): Recurrence settings for the downtime.
149 | - `type` (string): Recurrence type ('days', 'weeks', 'months', 'years').
150 | - `period` (number): How often to repeat (must be >= 1).
151 | - `weekDays` (optional array): Days of the week for weekly recurrence.
152 | - `until` (optional number): UNIX timestamp for when the recurrence ends.
153 | - **Returns**: Scheduled downtime details including ID and active status.
154 |
155 | 15. `cancel_downtime`
156 |
157 | - Cancel a scheduled downtime in Datadog.
158 | - **Inputs**:
159 | - `downtimeId` (number): The ID of the downtime to cancel.
160 | - **Returns**: Confirmation of downtime cancellation.
161 |
162 | 16. `get_rum_applications`
163 |
164 | - Get all RUM applications in the organization.
165 | - **Inputs**: None.
166 | - **Returns**: List of RUM applications.
167 |
168 | 17. `get_rum_events`
169 |
170 | - Search and retrieve RUM events from Datadog.
171 | - **Inputs**:
172 | - `query` (string): Datadog RUM query string.
173 | - `from` (number): Start time in epoch seconds.
174 | - `to` (number): End time in epoch seconds.
175 | - `limit` (optional number): Maximum number of events to return (default: 100).
176 | - **Returns**: Array of RUM events.
177 |
178 | 18. `get_rum_grouped_event_count`
179 |
180 | - Search, group and count RUM events by a specified dimension.
181 | - **Inputs**:
182 | - `query` (optional string): Additional query filter for RUM search (default: "\*").
183 | - `from` (number): Start time in epoch seconds.
184 | - `to` (number): End time in epoch seconds.
185 | - `groupBy` (optional string): Dimension to group results by (default: "application.name").
186 | - **Returns**: Grouped event counts.
187 |
188 | 19. `get_rum_page_performance`
189 |
190 | - Get page (view) performance metrics from RUM data.
191 | - **Inputs**:
192 | - `query` (optional string): Additional query filter for RUM search (default: "\*").
193 | - `from` (number): Start time in epoch seconds.
194 | - `to` (number): End time in epoch seconds.
195 | - `metricNames` (array of strings): Array of metric names to retrieve (e.g., 'view.load_time', 'view.first_contentful_paint').
196 | - **Returns**: Performance metrics including average, min, max, and count for each metric.
197 |
198 | 20. `get_rum_page_waterfall`
199 |
200 | - Retrieve RUM page (view) waterfall data filtered by application name and session ID.
201 | - **Inputs**:
202 | - `applicationName` (string): Application name to filter events.
203 | - `sessionId` (string): Session ID to filter events.
204 | - **Returns**: Waterfall data for the specified application and session.
205 |
206 | ## Setup
207 |
208 | ### Datadog Credentials
209 |
210 | You need valid Datadog API credentials to use this MCP server:
211 |
212 | - `DATADOG_API_KEY`: Your Datadog API key
213 | - `DATADOG_APP_KEY`: Your Datadog Application key
214 | - `DATADOG_SITE` (optional): The Datadog site (e.g. `datadoghq.eu`)
215 |
216 | Export them in your environment before running the server:
217 |
218 | ```bash
219 | export DATADOG_API_KEY="your_api_key"
220 | export DATADOG_APP_KEY="your_app_key"
221 | export DATADOG_SITE="your_datadog_site"
222 | ```
223 |
224 | ## Installation
225 |
226 | ### Installing via Smithery
227 |
228 | To install Datadog MCP Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@winor30/mcp-server-datadog):
229 |
230 | ```bash
231 | npx -y @smithery/cli install @winor30/mcp-server-datadog --client claude
232 | ```
233 |
234 | ### Manual Installation
235 |
236 | ```bash
237 | pnpm install
238 | pnpm build
239 | pnpm watch # for development with auto-rebuild
240 | ```
241 |
242 | ## Usage with Claude Desktop
243 |
244 | To use this with Claude Desktop, add the following to your `claude_desktop_config.json`:
245 |
246 | On MacOS: `~/Library/Application Support/Claude/claude_desktop_config.json`
247 | On Windows: `%APPDATA%/Claude/claude_desktop_config.json`
248 |
249 | ```json
250 | {
251 | "mcpServers": {
252 | "github": {
253 | "command": "npx",
254 | "args": ["-y", "@modelcontextprotocol/server-github"],
255 | "env": {
256 | "GITHUB_PERSONAL_ACCESS_TOKEN": ""
257 | }
258 | }
259 | }
260 | }
261 | ```
262 |
263 | ```json
264 | {
265 | "mcpServers": {
266 | "datadog": {
267 | "command": "/path/to/mcp-server-datadog/build/index.js",
268 | "env": {
269 | "DATADOG_API_KEY": "",
270 | "DATADOG_APP_KEY": "",
271 | "DATADOG_SITE": "" // Optional
272 | }
273 | }
274 | }
275 | }
276 | ```
277 |
278 | Or specify via `npx`:
279 |
280 | ```json
281 | {
282 | "mcpServers": {
283 | "mcp-server-datadog": {
284 | "command": "npx",
285 | "args": ["-y", "@winor30/mcp-server-datadog"],
286 | "env": {
287 | "DATADOG_API_KEY": "",
288 | "DATADOG_APP_KEY": "",
289 | "DATADOG_SITE": "" // Optional
290 | }
291 | }
292 | }
293 | }
294 | ```
295 |
296 | ## Debugging
297 |
298 | Because MCP servers communicate over standard input/output, debugging can sometimes be tricky. We recommend using the [MCP Inspector](https://github.com/modelcontextprotocol/inspector). You can run the inspector with:
299 |
300 | ```bash
301 | npm run inspector
302 | ```
303 |
304 | The inspector will provide a URL you can open in your browser to see logs and send requests manually.
305 |
306 | ## Contributing
307 |
308 | Contributions are welcome! Feel free to open an issue or a pull request if you have any suggestions, bug reports, or improvements to propose.
309 |
310 | ## License
311 |
312 | This project is licensed under the [Apache License, Version 2.0](./LICENSE).
313 |
--------------------------------------------------------------------------------
/eslint.config.js:
--------------------------------------------------------------------------------
1 | import globals from 'globals'
2 | import pluginJs from '@eslint/js'
3 | import tseslint from 'typescript-eslint'
4 |
5 | /** @type {import('eslint').Linter.Config[]} */
6 | export default [
7 | { files: ['**/*.{js,mjs,cjs,ts}'] },
8 | { ignores: ['node_modules/**', 'build/**'] },
9 | { languageOptions: { globals: globals.browser } },
10 | pluginJs.configs.recommended,
11 | ...tseslint.configs.recommended,
12 | ]
13 |
--------------------------------------------------------------------------------
/jest.config.ts:
--------------------------------------------------------------------------------
1 | /** @type {import('jest').Config} */
2 | module.exports = {
3 | preset: 'ts-jest',
4 | testEnvironment: 'node',
5 | testMatch: ['**/__tests__/**/*.ts', '**/?(*.)+(spec|test).ts'],
6 | }
7 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@winor30/mcp-server-datadog",
3 | "version": "1.6.0",
4 | "description": "MCP server for interacting with Datadog API",
5 | "repository": {
6 | "type": "git",
7 | "url": "https://github.com/winor30/mcp-server-datadog.git"
8 | },
9 | "type": "module",
10 | "bin": {
11 | "mcp-server-datadog": "./build/index.js"
12 | },
13 | "main": "build/index.js",
14 | "module": "build/index.js",
15 | "types": "build/index.d.ts",
16 | "files": [
17 | "build",
18 | "README.md"
19 | ],
20 | "access": "public",
21 | "publishConfig": {
22 | "registry": "https://registry.npmjs.org",
23 | "access": "public"
24 | },
25 | "scripts": {
26 | "build": "tsup && node -e \"require('fs').chmodSync('build/index.js', '755')\"",
27 | "prepare": "husky",
28 | "watch": "tsup --watch",
29 | "inspector": "npx @modelcontextprotocol/inspector build/index.js",
30 | "lint": "eslint . --ext .ts,.js --fix",
31 | "format": "prettier --write .",
32 | "test": "vitest run",
33 | "test:coverage": "vitest run --coverage",
34 | "test:watch": "vitest",
35 | "lint-staged": "lint-staged"
36 | },
37 | "dependencies": {
38 | "@datadog/datadog-api-client": "^1.34.1",
39 | "@modelcontextprotocol/sdk": "0.6.0",
40 | "zod": "^3.24.3",
41 | "zod-to-json-schema": "^3.24.5"
42 | },
43 | "devDependencies": {
44 | "@eslint/eslintrc": "^3.3.1",
45 | "@eslint/js": "^9.25.0",
46 | "@types/jest": "^29.5.14",
47 | "@types/node": "^20.17.30",
48 | "@vitest/coverage-v8": "3.0.8",
49 | "eslint": "^9.25.0",
50 | "globals": "^16.0.0",
51 | "husky": "^9.1.7",
52 | "jest": "^29.7.0",
53 | "msw": "^2.7.5",
54 | "prettier": "^3.5.3",
55 | "ts-jest": "^29.3.2",
56 | "ts-node": "^10.9.2",
57 | "tsup": "^8.4.0",
58 | "typescript": "^5.8.3",
59 | "typescript-eslint": "^8.30.1",
60 | "vitest": "^3.1.4"
61 | },
62 | "engines": {
63 | "node": ">=20.x",
64 | "pnpm": ">=10"
65 | },
66 | "pnpm": {
67 | "overrides": {
68 | "vite": ">=6.3.4"
69 | }
70 | },
71 | "lint-staged": {
72 | "*.{js,ts}": [
73 | "eslint --fix",
74 | "prettier --write"
75 | ],
76 | "*.{json,md}": [
77 | "prettier --write"
78 | ]
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/pnpm-workspace.yaml:
--------------------------------------------------------------------------------
1 | packages:
2 | - .
3 |
4 | onlyBuiltDependencies:
5 | - esbuild
6 | - msw
7 |
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
2 |
3 | startCommand:
4 | type: stdio
5 | configSchema:
6 | # JSON Schema defining the configuration options for the MCP.
7 | type: object
8 | required:
9 | - datadogApiKey
10 | - datadogAppKey
11 | properties:
12 | datadogApiKey:
13 | type: string
14 | description: Your Datadog API key
15 | datadogAppKey:
16 | type: string
17 | description: Your Datadog Application key
18 | datadogSite:
19 | type: string
20 | default: ''
21 | description: Optional Datadog site (e.g. datadoghq.eu)
22 | commandFunction:
23 | # A JS function that produces the CLI command based on the given config to start the MCP on stdio.
24 | |-
25 | (config) => ({
26 | command: 'node',
27 | args: ['build/index.js'],
28 | env: Object.assign({}, process.env, {
29 | DATADOG_API_KEY: config.datadogApiKey,
30 | DATADOG_APP_KEY: config.datadogAppKey,
31 | ...(config.datadogSite && { DATADOG_SITE: config.datadogSite })
32 | })
33 | })
34 | exampleConfig:
35 | datadogApiKey: your_datadog_api_key_here
36 | datadogAppKey: your_datadog_app_key_here
37 | datadogSite: datadoghq.com
38 |
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | /**
4 | * This script sets up the mcp-server-datadog.
5 | * It initializes an MCP server that integrates with Datadog for incident management.
6 | * By leveraging MCP, this server can list and retrieve incidents via the Datadog incident API.
7 | * With a design built for scalability, future integrations with additional Datadog APIs are anticipated.
8 | */
9 |
10 | import { Server } from '@modelcontextprotocol/sdk/server/index.js'
11 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'
12 | import {
13 | CallToolRequestSchema,
14 | ListToolsRequestSchema,
15 | } from '@modelcontextprotocol/sdk/types.js'
16 | import { log, mcpDatadogVersion } from './utils/helper'
17 | import { INCIDENT_TOOLS, createIncidentToolHandlers } from './tools/incident'
18 | import { METRICS_TOOLS, createMetricsToolHandlers } from './tools/metrics'
19 | import { LOGS_TOOLS, createLogsToolHandlers } from './tools/logs'
20 | import { MONITORS_TOOLS, createMonitorsToolHandlers } from './tools/monitors'
21 | import {
22 | DASHBOARDS_TOOLS,
23 | createDashboardsToolHandlers,
24 | } from './tools/dashboards'
25 | import { TRACES_TOOLS, createTracesToolHandlers } from './tools/traces'
26 | import { HOSTS_TOOLS, createHostsToolHandlers } from './tools/hosts'
27 | import { ToolHandlers } from './utils/types'
28 | import { createDatadogConfig } from './utils/datadog'
29 | import { createDowntimesToolHandlers, DOWNTIMES_TOOLS } from './tools/downtimes'
30 | import { createRumToolHandlers, RUM_TOOLS } from './tools/rum'
31 | import { v2, v1 } from '@datadog/datadog-api-client'
32 |
33 | const server = new Server(
34 | {
35 | name: 'Datadog MCP Server',
36 | version: mcpDatadogVersion,
37 | },
38 | {
39 | capabilities: {
40 | tools: {},
41 | },
42 | },
43 | )
44 |
45 | server.onerror = (error) => {
46 | log('error', `Server error: ${error.message}`, error.stack)
47 | }
48 |
49 | /**
50 | * Handler that retrieves the list of available tools in the mcp-server-datadog.
51 | * Currently, it provides incident management functionalities by integrating with Datadog's incident APIs.
52 | */
53 | server.setRequestHandler(ListToolsRequestSchema, async () => {
54 | return {
55 | tools: [
56 | ...INCIDENT_TOOLS,
57 | ...METRICS_TOOLS,
58 | ...LOGS_TOOLS,
59 | ...MONITORS_TOOLS,
60 | ...DASHBOARDS_TOOLS,
61 | ...TRACES_TOOLS,
62 | ...HOSTS_TOOLS,
63 | ...DOWNTIMES_TOOLS,
64 | ...RUM_TOOLS,
65 | ],
66 | }
67 | })
68 |
69 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
70 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
71 | }
72 |
73 | const datadogConfig = createDatadogConfig({
74 | apiKeyAuth: process.env.DATADOG_API_KEY,
75 | appKeyAuth: process.env.DATADOG_APP_KEY,
76 | site: process.env.DATADOG_SITE,
77 | })
78 |
79 | const TOOL_HANDLERS: ToolHandlers = {
80 | ...createIncidentToolHandlers(new v2.IncidentsApi(datadogConfig)),
81 | ...createMetricsToolHandlers(new v1.MetricsApi(datadogConfig)),
82 | ...createLogsToolHandlers(new v2.LogsApi(datadogConfig)),
83 | ...createMonitorsToolHandlers(new v1.MonitorsApi(datadogConfig)),
84 | ...createDashboardsToolHandlers(new v1.DashboardsApi(datadogConfig)),
85 | ...createTracesToolHandlers(new v2.SpansApi(datadogConfig)),
86 | ...createHostsToolHandlers(new v1.HostsApi(datadogConfig)),
87 | ...createDowntimesToolHandlers(new v1.DowntimesApi(datadogConfig)),
88 | ...createRumToolHandlers(new v2.RUMApi(datadogConfig)),
89 | }
90 | /**
91 | * Handler for invoking Datadog-related tools in the mcp-server-datadog.
92 | * The TOOL_HANDLERS object contains various tools that interact with different Datadog APIs.
93 | * By specifying the tool name in the request, the LLM can select and utilize the required tool.
94 | */
95 | server.setRequestHandler(CallToolRequestSchema, async (request) => {
96 | try {
97 | if (TOOL_HANDLERS[request.params.name]) {
98 | return await TOOL_HANDLERS[request.params.name](request)
99 | }
100 | throw new Error('Unknown tool')
101 | } catch (unknownError) {
102 | const error =
103 | unknownError instanceof Error
104 | ? unknownError
105 | : new Error(String(unknownError))
106 | log(
107 | 'error',
108 | `Request: ${request.params.name}, ${JSON.stringify(request.params.arguments)} failed`,
109 | error.message,
110 | error.stack,
111 | )
112 | throw error
113 | }
114 | })
115 |
116 | /**
117 | * Initializes and starts the mcp-server-datadog using stdio transport,
118 | * which sends and receives data through standard input and output.
119 | */
120 | async function main() {
121 | const transport = new StdioServerTransport()
122 | await server.connect(transport)
123 | }
124 |
125 | main().catch((error) => {
126 | log('error', 'Server error:', error)
127 | process.exit(1)
128 | })
129 |
--------------------------------------------------------------------------------
/src/tools/dashboards/index.ts:
--------------------------------------------------------------------------------
1 | export { DASHBOARDS_TOOLS, createDashboardsToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/dashboards/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const ListDashboardsZodSchema = z.object({
4 | name: z.string().optional().describe('Filter dashboards by name'),
5 | tags: z.array(z.string()).optional().describe('Filter dashboards by tags'),
6 | })
7 |
8 | export const GetDashboardZodSchema = z.object({
9 | dashboardId: z.string(),
10 | })
11 |
--------------------------------------------------------------------------------
/src/tools/dashboards/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetDashboardZodSchema, ListDashboardsZodSchema } from './schema'
5 |
6 | type DashboardsToolName = 'list_dashboards' | 'get_dashboard'
7 | type DashboardsTool = ExtendedTool
8 |
9 | export const DASHBOARDS_TOOLS: DashboardsTool[] = [
10 | createToolSchema(
11 | ListDashboardsZodSchema,
12 | 'list_dashboards',
13 | 'Get list of dashboards from Datadog',
14 | ),
15 | createToolSchema(
16 | GetDashboardZodSchema,
17 | 'get_dashboard',
18 | 'Get a dashboard from Datadog',
19 | ),
20 | ] as const
21 |
22 | type DashboardsToolHandlers = ToolHandlers
23 |
24 | export const createDashboardsToolHandlers = (
25 | apiInstance: v1.DashboardsApi,
26 | ): DashboardsToolHandlers => {
27 | return {
28 | list_dashboards: async (request) => {
29 | const { name, tags } = ListDashboardsZodSchema.parse(
30 | request.params.arguments,
31 | )
32 |
33 | const response = await apiInstance.listDashboards({
34 | filterShared: false,
35 | })
36 |
37 | if (!response.dashboards) {
38 | throw new Error('No dashboards data returned')
39 | }
40 |
41 | // Filter dashboards based on name and tags if provided
42 | let filteredDashboards = response.dashboards
43 | if (name) {
44 | const searchTerm = name.toLowerCase()
45 | filteredDashboards = filteredDashboards.filter((dashboard) =>
46 | dashboard.title?.toLowerCase().includes(searchTerm),
47 | )
48 | }
49 | if (tags && tags.length > 0) {
50 | filteredDashboards = filteredDashboards.filter((dashboard) => {
51 | const dashboardTags = dashboard.description?.split(',') || []
52 | return tags.every((tag) => dashboardTags.includes(tag))
53 | })
54 | }
55 |
56 | const dashboards = filteredDashboards.map((dashboard) => ({
57 | ...dashboard,
58 | url: `https://app.datadoghq.com/dashboard/${dashboard.id}`,
59 | }))
60 |
61 | return {
62 | content: [
63 | {
64 | type: 'text',
65 | text: `Dashboards: ${JSON.stringify(dashboards)}`,
66 | },
67 | ],
68 | }
69 | },
70 | get_dashboard: async (request) => {
71 | const { dashboardId } = GetDashboardZodSchema.parse(
72 | request.params.arguments,
73 | )
74 |
75 | const response = await apiInstance.getDashboard({
76 | dashboardId,
77 | })
78 |
79 | return {
80 | content: [
81 | {
82 | type: 'text',
83 | text: `Dashboard: ${JSON.stringify(response)}`,
84 | },
85 | ],
86 | }
87 | },
88 | }
89 | }
90 |
--------------------------------------------------------------------------------
/src/tools/downtimes/index.ts:
--------------------------------------------------------------------------------
1 | export { DOWNTIMES_TOOLS, createDowntimesToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/downtimes/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const ListDowntimesZodSchema = z.object({
4 | currentOnly: z.boolean().optional(),
5 | })
6 |
7 | export const ScheduleDowntimeZodSchema = z.object({
8 | scope: z.string().nonempty(), // example: 'host:my-host'
9 | start: z.number().optional(), // UNIX timestamp
10 | end: z.number().optional(), // UNIX timestamp
11 | message: z.string().optional(),
12 | timezone: z.string().optional(), // example: 'UTC', 'America/New_York'
13 | monitorId: z.number().optional(),
14 | monitorTags: z.array(z.string()).optional(),
15 | recurrence: z
16 | .object({
17 | type: z.enum(['days', 'weeks', 'months', 'years']),
18 | period: z.number().min(1),
19 | weekDays: z
20 | .array(z.enum(['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']))
21 | .optional(),
22 | until: z.number().optional(), // UNIX timestamp
23 | })
24 | .optional(),
25 | })
26 |
27 | export const CancelDowntimeZodSchema = z.object({
28 | downtimeId: z.number(),
29 | })
30 |
--------------------------------------------------------------------------------
/src/tools/downtimes/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import {
5 | ListDowntimesZodSchema,
6 | ScheduleDowntimeZodSchema,
7 | CancelDowntimeZodSchema,
8 | } from './schema'
9 |
10 | type DowntimesToolName =
11 | | 'list_downtimes'
12 | | 'schedule_downtime'
13 | | 'cancel_downtime'
14 | type DowntimesTool = ExtendedTool
15 |
16 | export const DOWNTIMES_TOOLS: DowntimesTool[] = [
17 | createToolSchema(
18 | ListDowntimesZodSchema,
19 | 'list_downtimes',
20 | 'List scheduled downtimes from Datadog',
21 | ),
22 | createToolSchema(
23 | ScheduleDowntimeZodSchema,
24 | 'schedule_downtime',
25 | 'Schedule a downtime in Datadog',
26 | ),
27 | createToolSchema(
28 | CancelDowntimeZodSchema,
29 | 'cancel_downtime',
30 | 'Cancel a scheduled downtime in Datadog',
31 | ),
32 | ] as const
33 |
34 | type DowntimesToolHandlers = ToolHandlers
35 |
36 | export const createDowntimesToolHandlers = (
37 | apiInstance: v1.DowntimesApi,
38 | ): DowntimesToolHandlers => {
39 | return {
40 | list_downtimes: async (request) => {
41 | const { currentOnly } = ListDowntimesZodSchema.parse(
42 | request.params.arguments,
43 | )
44 |
45 | const res = await apiInstance.listDowntimes({
46 | currentOnly,
47 | })
48 |
49 | return {
50 | content: [
51 | {
52 | type: 'text',
53 | text: `Listed downtimes:\n${JSON.stringify(res, null, 2)}`,
54 | },
55 | ],
56 | }
57 | },
58 |
59 | schedule_downtime: async (request) => {
60 | const params = ScheduleDowntimeZodSchema.parse(request.params.arguments)
61 |
62 | // Convert to the format expected by Datadog client
63 | const downtimeData: v1.Downtime = {
64 | scope: [params.scope],
65 | start: params.start,
66 | end: params.end,
67 | message: params.message,
68 | timezone: params.timezone,
69 | monitorId: params.monitorId,
70 | monitorTags: params.monitorTags,
71 | }
72 |
73 | // Add recurrence configuration if provided
74 | if (params.recurrence) {
75 | downtimeData.recurrence = {
76 | type: params.recurrence.type,
77 | period: params.recurrence.period,
78 | weekDays: params.recurrence.weekDays,
79 | }
80 | }
81 |
82 | const res = await apiInstance.createDowntime({
83 | body: downtimeData,
84 | })
85 |
86 | return {
87 | content: [
88 | {
89 | type: 'text',
90 | text: `Scheduled downtime: ${JSON.stringify(res, null, 2)}`,
91 | },
92 | ],
93 | }
94 | },
95 |
96 | cancel_downtime: async (request) => {
97 | const { downtimeId } = CancelDowntimeZodSchema.parse(
98 | request.params.arguments,
99 | )
100 |
101 | await apiInstance.cancelDowntime({
102 | downtimeId,
103 | })
104 |
105 | return {
106 | content: [
107 | {
108 | type: 'text',
109 | text: `Cancelled downtime with ID: ${downtimeId}`,
110 | },
111 | ],
112 | }
113 | },
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/src/tools/hosts/index.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Central export file for the Datadog Hosts management tools.
3 | * Re-exports the tools and their handlers from the implementation file.
4 | *
5 | * HOSTS_TOOLS: Array of tool schemas defining the available host management operations
6 | * createHostsToolHandlers: Function that creates host management operation handlers
7 | */
8 | export { HOSTS_TOOLS, createHostsToolHandlers } from './tool'
9 |
--------------------------------------------------------------------------------
/src/tools/hosts/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | /**
4 | * Zod schemas for validating input parameters for Datadog host management operations.
5 | * These schemas define the expected shape and types of data for each host-related tool.
6 | */
7 |
8 | /**
9 | * Schema for muting a host in Datadog.
10 | * Defines required and optional parameters for temporarily silencing a host's alerts.
11 | *
12 | * @param hostname - Required. Identifies the host to be muted
13 | * @param message - Optional. Adds context about why the host is being muted
14 | * @param end - Optional. Unix timestamp defining when the mute should automatically expire
15 | * @param override - Optional. Controls whether to replace an existing mute's end time
16 | */
17 | export const MuteHostZodSchema = z.object({
18 | hostname: z.string().describe('The name of the host to mute'),
19 | message: z
20 | .string()
21 | .optional()
22 | .describe('Message to associate with the muting of this host'),
23 | end: z
24 | .number()
25 | .int()
26 | .optional()
27 | .describe('POSIX timestamp for when the mute should end'),
28 | override: z
29 | .boolean()
30 | .optional()
31 | .default(false)
32 | .describe(
33 | 'If true and the host is already muted, replaces existing end time',
34 | ),
35 | })
36 |
37 | /**
38 | * Schema for unmuting a host in Datadog.
39 | * Defines parameters for re-enabling alerts for a previously muted host.
40 | *
41 | * @param hostname - Required. Identifies the host to be unmuted
42 | */
43 | export const UnmuteHostZodSchema = z.object({
44 | hostname: z.string().describe('The name of the host to unmute'),
45 | })
46 |
47 | /**
48 | * Schema for retrieving active host counts from Datadog.
49 | * Defines parameters for querying the number of reporting hosts within a time window.
50 | *
51 | * @param from - Optional. Time window in seconds to check for host activity
52 | * Defaults to 7200 seconds (2 hours)
53 | */
54 | export const GetActiveHostsCountZodSchema = z.object({
55 | from: z
56 | .number()
57 | .int()
58 | .optional()
59 | .default(7200)
60 | .describe(
61 | 'Number of seconds from which you want to get total number of active hosts (defaults to 2h)',
62 | ),
63 | })
64 |
65 | /**
66 | * Schema for listing and filtering hosts in Datadog.
67 | * Defines comprehensive parameters for querying and filtering host information.
68 | *
69 | * @param filter - Optional. Search string to filter hosts
70 | * @param sort_field - Optional. Field to sort results by
71 | * @param sort_dir - Optional. Sort direction ('asc' or 'desc')
72 | * @param start - Optional. Pagination offset
73 | * @param count - Optional. Number of hosts to return (max 1000)
74 | * @param from - Optional. Unix timestamp to start searching from
75 | * @param include_muted_hosts_data - Optional. Include muting information
76 | * @param include_hosts_metadata - Optional. Include detailed host metadata
77 | */
78 | export const ListHostsZodSchema = z.object({
79 | filter: z.string().optional().describe('Filter string for search results'),
80 | sort_field: z.string().optional().describe('Field to sort hosts by'),
81 | sort_dir: z.string().optional().describe('Sort direction (asc/desc)'),
82 | start: z.number().int().optional().describe('Starting offset for pagination'),
83 | count: z
84 | .number()
85 | .int()
86 | .max(1000)
87 | .optional()
88 | .describe('Max number of hosts to return (max: 1000)'),
89 | from: z
90 | .number()
91 | .int()
92 | .optional()
93 | .describe('Search hosts from this UNIX timestamp'),
94 | include_muted_hosts_data: z
95 | .boolean()
96 | .optional()
97 | .describe('Include muted hosts status and expiry'),
98 | include_hosts_metadata: z
99 | .boolean()
100 | .optional()
101 | .describe('Include host metadata (version, platform, etc)'),
102 | })
103 |
--------------------------------------------------------------------------------
/src/tools/hosts/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import {
5 | ListHostsZodSchema,
6 | GetActiveHostsCountZodSchema,
7 | MuteHostZodSchema,
8 | UnmuteHostZodSchema,
9 | } from './schema'
10 |
11 | /**
12 | * This module implements Datadog host management tools for muting, unmuting,
13 | * and retrieving host information using the Datadog API client.
14 | */
15 |
16 | /** Available host management tool names */
17 | type HostsToolName =
18 | | 'list_hosts'
19 | | 'get_active_hosts_count'
20 | | 'mute_host'
21 | | 'unmute_host'
22 | /** Extended tool type with host-specific operations */
23 | type HostsTool = ExtendedTool
24 |
25 | /**
26 | * Array of available host management tools.
27 | * Each tool is created with a schema for input validation and includes a description.
28 | */
29 | export const HOSTS_TOOLS: HostsTool[] = [
30 | createToolSchema(MuteHostZodSchema, 'mute_host', 'Mute a host in Datadog'),
31 | createToolSchema(
32 | UnmuteHostZodSchema,
33 | 'unmute_host',
34 | 'Unmute a host in Datadog',
35 | ),
36 | createToolSchema(
37 | ListHostsZodSchema,
38 | 'list_hosts',
39 | 'Get list of hosts from Datadog',
40 | ),
41 | createToolSchema(
42 | GetActiveHostsCountZodSchema,
43 | 'get_active_hosts_count',
44 | 'Get the total number of active hosts in Datadog (defaults to last 5 minutes)',
45 | ),
46 | ] as const
47 |
48 | /** Type definition for host management tool implementations */
49 | type HostsToolHandlers = ToolHandlers
50 |
51 | /**
52 | * Implementation of host management tool handlers.
53 | * Each handler validates inputs using Zod schemas and interacts with the Datadog API.
54 | */
55 | export const createHostsToolHandlers = (
56 | apiInstance: v1.HostsApi,
57 | ): HostsToolHandlers => {
58 | return {
59 | /**
60 | * Mutes a specified host in Datadog.
61 | * Silences alerts and notifications for the host until unmuted or until the specified end time.
62 | */
63 | mute_host: async (request) => {
64 | const { hostname, message, end, override } = MuteHostZodSchema.parse(
65 | request.params.arguments,
66 | )
67 |
68 | await apiInstance.muteHost({
69 | hostName: hostname,
70 | body: {
71 | message,
72 | end,
73 | override,
74 | },
75 | })
76 |
77 | return {
78 | content: [
79 | {
80 | type: 'text',
81 | text: JSON.stringify(
82 | {
83 | status: 'success',
84 | message: `Host ${hostname} has been muted successfully${message ? ` with message: ${message}` : ''}${end ? ` until ${new Date(end * 1000).toISOString()}` : ''}`,
85 | },
86 | null,
87 | 2,
88 | ),
89 | },
90 | ],
91 | }
92 | },
93 |
94 | /**
95 | * Unmutes a previously muted host in Datadog.
96 | * Re-enables alerts and notifications for the specified host.
97 | */
98 | unmute_host: async (request) => {
99 | const { hostname } = UnmuteHostZodSchema.parse(request.params.arguments)
100 |
101 | await apiInstance.unmuteHost({
102 | hostName: hostname,
103 | })
104 |
105 | return {
106 | content: [
107 | {
108 | type: 'text',
109 | text: JSON.stringify(
110 | {
111 | status: 'success',
112 | message: `Host ${hostname} has been unmuted successfully`,
113 | },
114 | null,
115 | 2,
116 | ),
117 | },
118 | ],
119 | }
120 | },
121 |
122 | /**
123 | * Retrieves counts of active and up hosts in Datadog.
124 | * Provides total counts of hosts that are reporting and operational.
125 | */
126 | get_active_hosts_count: async (request) => {
127 | const { from } = GetActiveHostsCountZodSchema.parse(
128 | request.params.arguments,
129 | )
130 |
131 | const response = await apiInstance.getHostTotals({
132 | from,
133 | })
134 |
135 | return {
136 | content: [
137 | {
138 | type: 'text',
139 | text: JSON.stringify(
140 | {
141 | total_active: response.totalActive || 0, // Total number of active hosts (UP and reporting) to Datadog
142 | total_up: response.totalUp || 0, // Number of hosts that are UP and reporting to Datadog
143 | },
144 | null,
145 | 2,
146 | ),
147 | },
148 | ],
149 | }
150 | },
151 |
152 | /**
153 | * Lists and filters hosts monitored by Datadog.
154 | * Supports comprehensive querying with filtering, sorting, and pagination.
155 | * Returns detailed host information including status, metadata, and monitoring data.
156 | */
157 | list_hosts: async (request) => {
158 | const {
159 | filter,
160 | sort_field,
161 | sort_dir,
162 | start,
163 | count,
164 | from,
165 | include_muted_hosts_data,
166 | include_hosts_metadata,
167 | } = ListHostsZodSchema.parse(request.params.arguments)
168 |
169 | const response = await apiInstance.listHosts({
170 | filter,
171 | sortField: sort_field,
172 | sortDir: sort_dir,
173 | start,
174 | count,
175 | from,
176 | includeMutedHostsData: include_muted_hosts_data,
177 | includeHostsMetadata: include_hosts_metadata,
178 | })
179 |
180 | if (!response.hostList) {
181 | throw new Error('No hosts data returned')
182 | }
183 |
184 | // Transform API response into a more convenient format
185 | const hosts = response.hostList.map((host) => ({
186 | name: host.name,
187 | id: host.id,
188 | aliases: host.aliases,
189 | apps: host.apps,
190 | mute: host.isMuted,
191 | last_reported: host.lastReportedTime,
192 | meta: host.meta,
193 | metrics: host.metrics,
194 | sources: host.sources,
195 | up: host.up,
196 | url: `https://app.datadoghq.com/infrastructure?host=${host.name}`,
197 | }))
198 |
199 | return {
200 | content: [
201 | {
202 | type: 'text',
203 | text: `Hosts: ${JSON.stringify(hosts)}`,
204 | },
205 | ],
206 | }
207 | },
208 | }
209 | }
210 |
--------------------------------------------------------------------------------
/src/tools/incident/index.ts:
--------------------------------------------------------------------------------
1 | export { INCIDENT_TOOLS, createIncidentToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/incident/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const ListIncidentsZodSchema = z.object({
4 | pageSize: z.number().min(1).max(100).default(10),
5 | pageOffset: z.number().min(0).default(0),
6 | })
7 |
8 | export const GetIncidentZodSchema = z.object({
9 | incidentId: z.string().nonempty(),
10 | })
11 |
--------------------------------------------------------------------------------
/src/tools/incident/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetIncidentZodSchema, ListIncidentsZodSchema } from './schema'
5 |
6 | type IncidentToolName = 'list_incidents' | 'get_incident'
7 | type IncidentTool = ExtendedTool
8 |
9 | export const INCIDENT_TOOLS: IncidentTool[] = [
10 | createToolSchema(
11 | ListIncidentsZodSchema,
12 | 'list_incidents',
13 | 'Get incidents from Datadog',
14 | ),
15 | createToolSchema(
16 | GetIncidentZodSchema,
17 | 'get_incident',
18 | 'Get an incident from Datadog',
19 | ),
20 | ] as const
21 |
22 | type IncidentToolHandlers = ToolHandlers
23 |
24 | export const createIncidentToolHandlers = (
25 | apiInstance: v2.IncidentsApi,
26 | ): IncidentToolHandlers => {
27 | return {
28 | list_incidents: async (request) => {
29 | const { pageSize, pageOffset } = ListIncidentsZodSchema.parse(
30 | request.params.arguments,
31 | )
32 |
33 | const response = await apiInstance.listIncidents({
34 | pageSize,
35 | pageOffset,
36 | })
37 |
38 | if (response.data == null) {
39 | throw new Error('No incidents data returned')
40 | }
41 |
42 | return {
43 | content: [
44 | {
45 | type: 'text',
46 | text: `Listed incidents:\n${response.data
47 | .map((d) => JSON.stringify(d))
48 | .join('\n')}`,
49 | },
50 | ],
51 | }
52 | },
53 | get_incident: async (request) => {
54 | const { incidentId } = GetIncidentZodSchema.parse(
55 | request.params.arguments,
56 | )
57 |
58 | const response = await apiInstance.getIncident({
59 | incidentId,
60 | })
61 |
62 | if (response.data == null) {
63 | throw new Error('No incident data returned')
64 | }
65 |
66 | return {
67 | content: [
68 | {
69 | type: 'text',
70 | text: `Incident: ${JSON.stringify(response.data)}`,
71 | },
72 | ],
73 | }
74 | },
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/src/tools/logs/index.ts:
--------------------------------------------------------------------------------
1 | export { LOGS_TOOLS, createLogsToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/logs/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const GetLogsZodSchema = z.object({
4 | query: z.string().default('').describe('Datadog logs query string'),
5 | from: z.number().describe('Start time in epoch seconds'),
6 | to: z.number().describe('End time in epoch seconds'),
7 | limit: z
8 | .number()
9 | .optional()
10 | .default(100)
11 | .describe('Maximum number of logs to return. Default is 100.'),
12 | })
13 |
14 | /**
15 | * Schema for retrieving all unique service names from logs.
16 | * Defines parameters for querying logs within a time window.
17 | *
18 | * @param query - Optional. Additional query filter for log search. Defaults to "*" (all logs)
19 | * @param from - Required. Start time in epoch seconds
20 | * @param to - Required. End time in epoch seconds
21 | * @param limit - Optional. Maximum number of logs to search through. Default is 1000.
22 | */
23 | export const GetAllServicesZodSchema = z.object({
24 | query: z
25 | .string()
26 | .default('*')
27 | .describe('Optional query filter for log search'),
28 | from: z.number().describe('Start time in epoch seconds'),
29 | to: z.number().describe('End time in epoch seconds'),
30 | limit: z
31 | .number()
32 | .optional()
33 | .default(1000)
34 | .describe('Maximum number of logs to search through. Default is 1000.'),
35 | })
36 |
--------------------------------------------------------------------------------
/src/tools/logs/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetLogsZodSchema, GetAllServicesZodSchema } from './schema'
5 |
6 | type LogsToolName = 'get_logs' | 'get_all_services'
7 | type LogsTool = ExtendedTool
8 |
9 | export const LOGS_TOOLS: LogsTool[] = [
10 | createToolSchema(
11 | GetLogsZodSchema,
12 | 'get_logs',
13 | 'Search and retrieve logs from Datadog',
14 | ),
15 | createToolSchema(
16 | GetAllServicesZodSchema,
17 | 'get_all_services',
18 | 'Extract all unique service names from logs',
19 | ),
20 | ] as const
21 |
22 | type LogsToolHandlers = ToolHandlers
23 |
24 | export const createLogsToolHandlers = (
25 | apiInstance: v2.LogsApi,
26 | ): LogsToolHandlers => ({
27 | get_logs: async (request) => {
28 | const { query, from, to, limit } = GetLogsZodSchema.parse(
29 | request.params.arguments,
30 | )
31 |
32 | const response = await apiInstance.listLogs({
33 | body: {
34 | filter: {
35 | query,
36 | // `from` and `to` are in epoch seconds, but the Datadog API expects milliseconds
37 | from: `${from * 1000}`,
38 | to: `${to * 1000}`,
39 | },
40 | page: {
41 | limit,
42 | },
43 | sort: '-timestamp',
44 | },
45 | })
46 |
47 | if (response.data == null) {
48 | throw new Error('No logs data returned')
49 | }
50 |
51 | return {
52 | content: [
53 | {
54 | type: 'text',
55 | text: `Logs data: ${JSON.stringify(response.data)}`,
56 | },
57 | ],
58 | }
59 | },
60 |
61 | get_all_services: async (request) => {
62 | const { query, from, to, limit } = GetAllServicesZodSchema.parse(
63 | request.params.arguments,
64 | )
65 |
66 | const response = await apiInstance.listLogs({
67 | body: {
68 | filter: {
69 | query,
70 | // `from` and `to` are in epoch seconds, but the Datadog API expects milliseconds
71 | from: `${from * 1000}`,
72 | to: `${to * 1000}`,
73 | },
74 | page: {
75 | limit,
76 | },
77 | sort: '-timestamp',
78 | },
79 | })
80 |
81 | if (response.data == null) {
82 | throw new Error('No logs data returned')
83 | }
84 |
85 | // Extract unique services from logs
86 | const services = new Set()
87 |
88 | for (const log of response.data) {
89 | // Access service attribute from logs based on the Datadog API structure
90 | if (log.attributes && log.attributes.service) {
91 | services.add(log.attributes.service)
92 | }
93 | }
94 |
95 | return {
96 | content: [
97 | {
98 | type: 'text',
99 | text: `Services: ${JSON.stringify(Array.from(services).sort())}`,
100 | },
101 | ],
102 | }
103 | },
104 | })
105 |
--------------------------------------------------------------------------------
/src/tools/metrics/index.ts:
--------------------------------------------------------------------------------
1 | export { METRICS_TOOLS, createMetricsToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/metrics/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const QueryMetricsZodSchema = z.object({
4 | from: z
5 | .number()
6 | .describe(
7 | 'Start of the queried time period, seconds since the Unix epoch.',
8 | ),
9 | to: z
10 | .number()
11 | .describe('End of the queried time period, seconds since the Unix epoch.'),
12 | query: z
13 | .string()
14 | .describe('Datadog metrics query string. e.g. "avg:system.cpu.user{*}'),
15 | })
16 |
17 | export type QueryMetricsArgs = z.infer
18 |
--------------------------------------------------------------------------------
/src/tools/metrics/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { QueryMetricsZodSchema } from './schema'
5 |
6 | type MetricsToolName = 'query_metrics'
7 | type MetricsTool = ExtendedTool
8 |
9 | export const METRICS_TOOLS: MetricsTool[] = [
10 | createToolSchema(
11 | QueryMetricsZodSchema,
12 | 'query_metrics',
13 | 'Query timeseries points of metrics from Datadog',
14 | ),
15 | ] as const
16 |
17 | type MetricsToolHandlers = ToolHandlers
18 |
19 | export const createMetricsToolHandlers = (
20 | apiInstance: v1.MetricsApi,
21 | ): MetricsToolHandlers => {
22 | return {
23 | query_metrics: async (request) => {
24 | const { from, to, query } = QueryMetricsZodSchema.parse(
25 | request.params.arguments,
26 | )
27 |
28 | const response = await apiInstance.queryMetrics({
29 | from,
30 | to,
31 | query,
32 | })
33 |
34 | return {
35 | content: [
36 | {
37 | type: 'text',
38 | text: `Queried metrics data: ${JSON.stringify({ response })}`,
39 | },
40 | ],
41 | }
42 | },
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/tools/monitors/index.ts:
--------------------------------------------------------------------------------
1 | export { MONITORS_TOOLS, createMonitorsToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/monitors/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const GetMonitorsZodSchema = z.object({
4 | groupStates: z
5 | .array(z.enum(['alert', 'warn', 'no data', 'ok']))
6 | .optional()
7 | .describe('Filter monitors by their states'),
8 | name: z.string().optional().describe('Filter monitors by name'),
9 | tags: z.array(z.string()).optional().describe('Filter monitors by tags'),
10 | })
11 |
--------------------------------------------------------------------------------
/src/tools/monitors/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v1 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { GetMonitorsZodSchema } from './schema'
5 | import { unreachable } from '../../utils/helper'
6 | import { UnparsedObject } from '@datadog/datadog-api-client/dist/packages/datadog-api-client-common/util.js'
7 |
8 | type MonitorsToolName = 'get_monitors'
9 | type MonitorsTool = ExtendedTool
10 |
11 | export const MONITORS_TOOLS: MonitorsTool[] = [
12 | createToolSchema(
13 | GetMonitorsZodSchema,
14 | 'get_monitors',
15 | 'Get monitors status from Datadog',
16 | ),
17 | ] as const
18 |
19 | type MonitorsToolHandlers = ToolHandlers
20 |
21 | export const createMonitorsToolHandlers = (
22 | apiInstance: v1.MonitorsApi,
23 | ): MonitorsToolHandlers => {
24 | return {
25 | get_monitors: async (request) => {
26 | const { groupStates, name, tags } = GetMonitorsZodSchema.parse(
27 | request.params.arguments,
28 | )
29 |
30 | const response = await apiInstance.listMonitors({
31 | groupStates: groupStates?.join(','),
32 | name,
33 | tags: tags?.join(','),
34 | })
35 |
36 | if (response == null) {
37 | throw new Error('No monitors data returned')
38 | }
39 |
40 | const monitors = response.map((monitor) => ({
41 | name: monitor.name || '',
42 | id: monitor.id || 0,
43 | status: (monitor.overallState as string) || 'unknown',
44 | message: monitor.message,
45 | tags: monitor.tags || [],
46 | query: monitor.query || '',
47 | lastUpdatedTs: monitor.modified
48 | ? Math.floor(new Date(monitor.modified).getTime() / 1000)
49 | : undefined,
50 | }))
51 |
52 | // Calculate summary
53 | const summary = response.reduce(
54 | (acc, monitor) => {
55 | const status = monitor.overallState
56 | if (status == null || status instanceof UnparsedObject) {
57 | return acc
58 | }
59 |
60 | switch (status) {
61 | case 'Alert':
62 | acc.alert++
63 | break
64 | case 'Warn':
65 | acc.warn++
66 | break
67 | case 'No Data':
68 | acc.noData++
69 | break
70 | case 'OK':
71 | acc.ok++
72 | break
73 | case 'Ignored':
74 | acc.ignored++
75 | break
76 | case 'Skipped':
77 | acc.skipped++
78 | break
79 | case 'Unknown':
80 | acc.unknown++
81 | break
82 | default:
83 | unreachable(status)
84 | }
85 | return acc
86 | },
87 | {
88 | alert: 0,
89 | warn: 0,
90 | noData: 0,
91 | ok: 0,
92 | ignored: 0,
93 | skipped: 0,
94 | unknown: 0,
95 | },
96 | )
97 |
98 | return {
99 | content: [
100 | {
101 | type: 'text',
102 | text: `Monitors: ${JSON.stringify(monitors)}`,
103 | },
104 | {
105 | type: 'text',
106 | text: `Summary of monitors: ${JSON.stringify(summary)}`,
107 | },
108 | ],
109 | }
110 | },
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/src/tools/rum/index.ts:
--------------------------------------------------------------------------------
1 | export { RUM_TOOLS, createRumToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/rum/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | /**
4 | * Schema for retrieving RUM events.
5 | * Defines parameters for querying RUM events within a time window.
6 | *
7 | * @param query - Datadog RUM query string
8 | * @param from - Start time in epoch seconds
9 | * @param to - End time in epoch seconds
10 | * @param limit - Maximum number of events to return (default: 100)
11 | */
12 | export const GetRumEventsZodSchema = z.object({
13 | query: z.string().default('').describe('Datadog RUM query string'),
14 | from: z.number().describe('Start time in epoch seconds'),
15 | to: z.number().describe('End time in epoch seconds'),
16 | limit: z
17 | .number()
18 | .optional()
19 | .default(100)
20 | .describe('Maximum number of events to return. Default is 100.'),
21 | })
22 |
23 | /**
24 | * Schema for retrieving RUM applications.
25 | * Returns a list of all RUM applications in the organization.
26 | */
27 | export const GetRumApplicationsZodSchema = z.object({})
28 |
29 | /**
30 | * Schema for retrieving unique user session counts.
31 | * Defines parameters for querying session counts within a time window.
32 | *
33 | * @param query - Optional. Additional query filter for RUM search. Defaults to "*" (all events)
34 | * @param from - Start time in epoch seconds
35 | * @param to - End time in epoch seconds
36 | * @param groupBy - Optional. Dimension to group results by (e.g., 'application.name')
37 | */
38 | export const GetRumGroupedEventCountZodSchema = z.object({
39 | query: z
40 | .string()
41 | .default('*')
42 | .describe('Optional query filter for RUM search'),
43 | from: z.number().describe('Start time in epoch seconds'),
44 | to: z.number().describe('End time in epoch seconds'),
45 | groupBy: z
46 | .string()
47 | .optional()
48 | .default('application.name')
49 | .describe('Dimension to group results by. Default is application.name'),
50 | })
51 |
52 | /**
53 | * Schema for retrieving page performance metrics.
54 | * Defines parameters for querying performance metrics within a time window.
55 | *
56 | * @param query - Optional. Additional query filter for RUM search. Defaults to "*" (all events)
57 | * @param from - Start time in epoch seconds
58 | * @param to - End time in epoch seconds
59 | * @param metricNames - Array of metric names to retrieve (e.g., 'view.load_time', 'view.first_contentful_paint')
60 | */
61 | export const GetRumPagePerformanceZodSchema = z.object({
62 | query: z
63 | .string()
64 | .default('*')
65 | .describe('Optional query filter for RUM search'),
66 | from: z.number().describe('Start time in epoch seconds'),
67 | to: z.number().describe('End time in epoch seconds'),
68 | metricNames: z
69 | .array(z.string())
70 | .default([
71 | 'view.load_time',
72 | 'view.first_contentful_paint',
73 | 'view.largest_contentful_paint',
74 | ])
75 | .describe('Array of metric names to retrieve'),
76 | })
77 |
78 | /**
79 | * Schema for retrieving RUM page waterfall data.
80 | * Defines parameters for querying waterfall data within a time window.
81 | *
82 | * @param application - Application name or ID to filter events
83 | * @param sessionId - Session ID to filter events
84 | * @param from - Start time in epoch seconds
85 | * @param to - End time in epoch seconds
86 | */
87 | export const GetRumPageWaterfallZodSchema = z.object({
88 | applicationName: z.string().describe('Application name to filter events'),
89 | sessionId: z.string().describe('Session ID to filter events'),
90 | })
91 |
--------------------------------------------------------------------------------
/src/tools/rum/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import {
5 | GetRumEventsZodSchema,
6 | GetRumApplicationsZodSchema,
7 | GetRumGroupedEventCountZodSchema,
8 | GetRumPagePerformanceZodSchema,
9 | GetRumPageWaterfallZodSchema,
10 | } from './schema'
11 |
12 | type RumToolName =
13 | | 'get_rum_events'
14 | | 'get_rum_applications'
15 | | 'get_rum_grouped_event_count'
16 | | 'get_rum_page_performance'
17 | | 'get_rum_page_waterfall'
18 | type RumTool = ExtendedTool
19 |
20 | export const RUM_TOOLS: RumTool[] = [
21 | createToolSchema(
22 | GetRumApplicationsZodSchema,
23 | 'get_rum_applications',
24 | 'Get all RUM applications in the organization',
25 | ),
26 | createToolSchema(
27 | GetRumEventsZodSchema,
28 | 'get_rum_events',
29 | 'Search and retrieve RUM events from Datadog',
30 | ),
31 | createToolSchema(
32 | GetRumGroupedEventCountZodSchema,
33 | 'get_rum_grouped_event_count',
34 | 'Search, group and count RUM events by a specified dimension',
35 | ),
36 | createToolSchema(
37 | GetRumPagePerformanceZodSchema,
38 | 'get_rum_page_performance',
39 | 'Get page (view) performance metrics from RUM data',
40 | ),
41 | createToolSchema(
42 | GetRumPageWaterfallZodSchema,
43 | 'get_rum_page_waterfall',
44 | 'Retrieve RUM page (view) waterfall data filtered by application name and session ID',
45 | ),
46 | ] as const
47 |
48 | type RumToolHandlers = ToolHandlers
49 |
50 | export const createRumToolHandlers = (
51 | apiInstance: v2.RUMApi,
52 | ): RumToolHandlers => ({
53 | get_rum_applications: async (request) => {
54 | GetRumApplicationsZodSchema.parse(request.params.arguments)
55 |
56 | const response = await apiInstance.getRUMApplications()
57 |
58 | if (response.data == null) {
59 | throw new Error('No RUM applications data returned')
60 | }
61 |
62 | return {
63 | content: [
64 | {
65 | type: 'text',
66 | text: `RUM applications: ${JSON.stringify(response.data)}`,
67 | },
68 | ],
69 | }
70 | },
71 |
72 | get_rum_events: async (request) => {
73 | const { query, from, to, limit } = GetRumEventsZodSchema.parse(
74 | request.params.arguments,
75 | )
76 |
77 | const response = await apiInstance.listRUMEvents({
78 | filterQuery: query,
79 | filterFrom: new Date(from * 1000),
80 | filterTo: new Date(to * 1000),
81 | sort: 'timestamp',
82 | pageLimit: limit,
83 | })
84 |
85 | if (response.data == null) {
86 | throw new Error('No RUM events data returned')
87 | }
88 |
89 | return {
90 | content: [
91 | {
92 | type: 'text',
93 | text: `RUM events data: ${JSON.stringify(response.data)}`,
94 | },
95 | ],
96 | }
97 | },
98 |
99 | get_rum_grouped_event_count: async (request) => {
100 | const { query, from, to, groupBy } = GetRumGroupedEventCountZodSchema.parse(
101 | request.params.arguments,
102 | )
103 |
104 | // For session counts, we need to use a query to count unique sessions
105 | const response = await apiInstance.listRUMEvents({
106 | filterQuery: query !== '*' ? query : undefined,
107 | filterFrom: new Date(from * 1000),
108 | filterTo: new Date(to * 1000),
109 | sort: 'timestamp',
110 | pageLimit: 2000,
111 | })
112 |
113 | if (response.data == null) {
114 | throw new Error('No RUM events data returned')
115 | }
116 |
117 | // Extract session counts grouped by the specified dimension
118 | const sessions = new Map>()
119 |
120 | for (const event of response.data) {
121 | if (!event.attributes?.attributes) {
122 | continue
123 | }
124 |
125 | // Parse the groupBy path (e.g., 'application.id')
126 | const groupPath = groupBy.split('.') as Array<
127 | keyof typeof event.attributes.attributes
128 | >
129 |
130 | const result = getValueByPath(
131 | event.attributes.attributes,
132 | groupPath.map((path) => String(path)),
133 | )
134 | const groupValue = result.found ? String(result.value) : 'unknown'
135 |
136 | // Get or create the session set for this group
137 | if (!sessions.has(groupValue)) {
138 | sessions.set(groupValue, new Set())
139 | }
140 |
141 | // Add the session ID to the set if it exists
142 | if (event.attributes.attributes.session?.id) {
143 | sessions.get(groupValue)?.add(event.attributes.attributes.session.id)
144 | }
145 | }
146 |
147 | // Convert the map to an object with counts
148 | const sessionCounts = Object.fromEntries(
149 | Array.from(sessions.entries()).map(([key, set]) => [key, set.size]),
150 | )
151 |
152 | return {
153 | content: [
154 | {
155 | type: 'text',
156 | text: `Session counts (grouped by ${groupBy}): ${JSON.stringify(sessionCounts)}`,
157 | },
158 | ],
159 | }
160 | },
161 |
162 | get_rum_page_performance: async (request) => {
163 | const { query, from, to, metricNames } =
164 | GetRumPagePerformanceZodSchema.parse(request.params.arguments)
165 |
166 | // Build a query that focuses on view events with performance metrics
167 | const viewQuery = query !== '*' ? `@type:view ${query}` : '@type:view'
168 |
169 | const response = await apiInstance.listRUMEvents({
170 | filterQuery: viewQuery,
171 | filterFrom: new Date(from * 1000),
172 | filterTo: new Date(to * 1000),
173 | sort: 'timestamp',
174 | pageLimit: 2000,
175 | })
176 |
177 | if (response.data == null) {
178 | throw new Error('No RUM events data returned')
179 | }
180 |
181 | // Extract and calculate performance metrics
182 | const metrics: Record = metricNames.reduce(
183 | (acc, name) => {
184 | acc[name] = []
185 | return acc
186 | },
187 | {} as Record,
188 | )
189 |
190 | for (const event of response.data) {
191 | if (!event.attributes?.attributes) {
192 | continue
193 | }
194 |
195 | // Collect each requested metric if it exists
196 | for (const metricName of metricNames) {
197 | // Handle nested properties like 'view.load_time'
198 | const metricNameParts = metricName.split('.') as Array<
199 | keyof typeof event.attributes.attributes
200 | >
201 |
202 | if (event.attributes.attributes == null) {
203 | continue
204 | }
205 |
206 | const value = metricNameParts.reduce(
207 | (acc, part) => (acc ? acc[part] : undefined),
208 | event.attributes.attributes,
209 | )
210 |
211 | // If we found a numeric value, add it to the metrics
212 | if (typeof value === 'number') {
213 | metrics[metricName].push(value)
214 | }
215 | }
216 | }
217 |
218 | // Calculate statistics for each metric
219 | const results: Record<
220 | string,
221 | { avg: number; min: number; max: number; count: number }
222 | > = Object.entries(metrics).reduce(
223 | (acc, [name, values]) => {
224 | if (values.length > 0) {
225 | const sum = values.reduce((a, b) => a + b, 0)
226 | acc[name] = {
227 | avg: sum / values.length,
228 | min: Math.min(...values),
229 | max: Math.max(...values),
230 | count: values.length,
231 | }
232 | } else {
233 | acc[name] = { avg: 0, min: 0, max: 0, count: 0 }
234 | }
235 | return acc
236 | },
237 | {} as Record<
238 | string,
239 | { avg: number; min: number; max: number; count: number }
240 | >,
241 | )
242 |
243 | return {
244 | content: [
245 | {
246 | type: 'text',
247 | text: `Page performance metrics: ${JSON.stringify(results)}`,
248 | },
249 | ],
250 | }
251 | },
252 |
253 | get_rum_page_waterfall: async (request) => {
254 | const { applicationName, sessionId } = GetRumPageWaterfallZodSchema.parse(
255 | request.params.arguments,
256 | )
257 |
258 | const response = await apiInstance.listRUMEvents({
259 | filterQuery: `@application.name:${applicationName} @session.id:${sessionId}`,
260 | sort: 'timestamp',
261 | pageLimit: 2000,
262 | })
263 |
264 | if (response.data == null) {
265 | throw new Error('No RUM events data returned')
266 | }
267 |
268 | return {
269 | content: [
270 | {
271 | type: 'text',
272 | text: `Waterfall data: ${JSON.stringify(response.data)}`,
273 | },
274 | ],
275 | }
276 | },
277 | })
278 |
279 | // Get the group value using a recursive function approach
280 | const getValueByPath = (
281 | obj: Record,
282 | path: string[],
283 | index = 0,
284 | ): { value: unknown; found: boolean } => {
285 | if (index >= path.length) {
286 | return { value: obj, found: true }
287 | }
288 |
289 | const key = path[index]
290 | const typedObj = obj as Record
291 |
292 | if (typedObj[key] === undefined) {
293 | return { value: null, found: false }
294 | }
295 |
296 | return getValueByPath(
297 | typedObj[key] as Record,
298 | path,
299 | index + 1,
300 | )
301 | }
302 |
--------------------------------------------------------------------------------
/src/tools/traces/index.ts:
--------------------------------------------------------------------------------
1 | export { TRACES_TOOLS, createTracesToolHandlers } from './tool'
2 |
--------------------------------------------------------------------------------
/src/tools/traces/schema.ts:
--------------------------------------------------------------------------------
1 | import { z } from 'zod'
2 |
3 | export const ListTracesZodSchema = z.object({
4 | query: z.string().describe('Datadog APM trace query string'),
5 | from: z.number().describe('Start time in epoch seconds'),
6 | to: z.number().describe('End time in epoch seconds'),
7 | limit: z
8 | .number()
9 | .optional()
10 | .default(100)
11 | .describe('Maximum number of traces to return'),
12 | sort: z
13 | .enum(['timestamp', '-timestamp'])
14 | .optional()
15 | .default('-timestamp')
16 | .describe('Sort order for traces'),
17 | service: z.string().optional().describe('Filter by service name'),
18 | operation: z.string().optional().describe('Filter by operation name'),
19 | })
20 |
21 | export type ListTracesArgs = z.infer
22 |
--------------------------------------------------------------------------------
/src/tools/traces/tool.ts:
--------------------------------------------------------------------------------
1 | import { ExtendedTool, ToolHandlers } from '../../utils/types'
2 | import { v2 } from '@datadog/datadog-api-client'
3 | import { createToolSchema } from '../../utils/tool'
4 | import { ListTracesZodSchema } from './schema'
5 |
6 | type TracesToolName = 'list_traces'
7 | type TracesTool = ExtendedTool
8 |
9 | export const TRACES_TOOLS: TracesTool[] = [
10 | createToolSchema(
11 | ListTracesZodSchema,
12 | 'list_traces',
13 | 'Get APM traces from Datadog',
14 | ),
15 | ] as const
16 |
17 | type TracesToolHandlers = ToolHandlers
18 |
19 | export const createTracesToolHandlers = (
20 | apiInstance: v2.SpansApi,
21 | ): TracesToolHandlers => {
22 | return {
23 | list_traces: async (request) => {
24 | const {
25 | query,
26 | from,
27 | to,
28 | limit = 100,
29 | sort = '-timestamp',
30 | service,
31 | operation,
32 | } = ListTracesZodSchema.parse(request.params.arguments)
33 |
34 | const response = await apiInstance.listSpans({
35 | body: {
36 | data: {
37 | attributes: {
38 | filter: {
39 | query: [
40 | query,
41 | ...(service ? [`service:${service}`] : []),
42 | ...(operation ? [`operation:${operation}`] : []),
43 | ].join(' '),
44 | from: new Date(from * 1000).toISOString(),
45 | to: new Date(to * 1000).toISOString(),
46 | },
47 | sort: sort as 'timestamp' | '-timestamp',
48 | page: { limit },
49 | },
50 | type: 'search_request',
51 | },
52 | },
53 | })
54 |
55 | if (!response.data) {
56 | throw new Error('No traces data returned')
57 | }
58 |
59 | return {
60 | content: [
61 | {
62 | type: 'text',
63 | text: `Traces: ${JSON.stringify({
64 | traces: response.data,
65 | count: response.data.length,
66 | })}`,
67 | },
68 | ],
69 | }
70 | },
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/src/utils/datadog.ts:
--------------------------------------------------------------------------------
1 | import { client } from '@datadog/datadog-api-client'
2 |
3 | interface CreateDatadogConfigParams {
4 | apiKeyAuth: string
5 | appKeyAuth: string
6 | site?: string
7 | }
8 |
9 | export function createDatadogConfig(
10 | config: CreateDatadogConfigParams,
11 | ): client.Configuration {
12 | if (!config.apiKeyAuth || !config.appKeyAuth) {
13 | throw new Error('Datadog API key and APP key are required')
14 | }
15 | const datadogConfig = client.createConfiguration({
16 | authMethods: {
17 | apiKeyAuth: config.apiKeyAuth,
18 | appKeyAuth: config.appKeyAuth,
19 | },
20 | })
21 |
22 | if (config.site != null) {
23 | datadogConfig.setServerVariables({
24 | site: config.site,
25 | })
26 | }
27 |
28 | datadogConfig.unstableOperations = {
29 | 'v2.listIncidents': true,
30 | 'v2.getIncident': true,
31 | }
32 |
33 | return datadogConfig
34 | }
35 |
36 | export function getDatadogSite(ddConfig: client.Configuration): string {
37 | const config = ddConfig.servers[0]?.getConfiguration()
38 | if (config == null) {
39 | throw new Error('Datadog site is not set')
40 | }
41 | return config.site
42 | }
43 |
--------------------------------------------------------------------------------
/src/utils/helper.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Logs a formatted message with a specified severity to stderr.
3 | *
4 | * The MCP server uses stdio transport, so using console.log might interfere with the transport.
5 | * Therefore, logging messages are written to stderr.
6 | *
7 | * @param {'info' | 'error'} severity - The severity level of the log message.
8 | * @param {...any[]} args - Additional arguments to be logged, which will be concatenated into a single string.
9 | */
10 | export function log(
11 | severity: 'info' | 'error',
12 | ...args: any[] // eslint-disable-line @typescript-eslint/no-explicit-any
13 | ) {
14 | const msg = `[${severity.toUpperCase()} ${new Date().toISOString()}] ${args.join(' ')}\n`
15 | process.stderr.write(msg)
16 | }
17 |
18 | export { version as mcpDatadogVersion } from '../../package.json'
19 |
20 | export function unreachable(value: never): never {
21 | throw new Error(`Unreachable code: ${value}`)
22 | }
23 |
--------------------------------------------------------------------------------
/src/utils/tool.ts:
--------------------------------------------------------------------------------
1 | import { Tool } from '@modelcontextprotocol/sdk/types.js'
2 | import { ZodSchema } from 'zod'
3 | import zodToJsonSchema from 'zod-to-json-schema'
4 |
5 | type JsonSchema = Record // eslint-disable-line @typescript-eslint/no-explicit-any
6 |
7 | function pickRootObjectProperty(
8 | fullSchema: JsonSchema,
9 | schemaName: string,
10 | ): {
11 | type: 'object'
12 | properties: any // eslint-disable-line @typescript-eslint/no-explicit-any
13 | required?: string[]
14 | } {
15 | const definitions = fullSchema.definitions ?? {}
16 | const root = definitions[schemaName]
17 | return {
18 | type: 'object',
19 | properties: root?.properties ?? {},
20 | required: root?.required ?? [],
21 | }
22 | }
23 |
24 | /**
25 | * Creates a tool definition object using the provided Zod schema.
26 | *
27 | * This function converts a Zod schema (acting as the single source of truth) into a JSON Schema,
28 | * extracts the relevant root object properties, and embeds them into the tool definition.
29 | * This approach avoids duplicate schema definitions and ensures type safety and consistency.
30 | *
31 | * Note: The provided name is also used as the tool's name in the Model Context Protocol.
32 | *
33 | * @param schema - The Zod schema representing the tool's parameters.
34 | * @param name - The name of the tool and the key used to extract the corresponding schema definition, and the tool's name in the Model Context Protocol.
35 | * @param description - A brief description of the tool's functionality.
36 | * @returns A tool object containing the name, description, and input JSON Schema.
37 | */
38 | export function createToolSchema(
39 | schema: ZodSchema, // eslint-disable-line @typescript-eslint/no-explicit-any
40 | name: T,
41 | description: string,
42 | ): Tool & { name: T } {
43 | return {
44 | name,
45 | description,
46 | inputSchema: pickRootObjectProperty(
47 | zodToJsonSchema(schema, { name }),
48 | name,
49 | ),
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/utils/types.ts:
--------------------------------------------------------------------------------
1 | import z from 'zod'
2 | import {
3 | Result,
4 | CallToolRequestSchema,
5 | Tool,
6 | } from '@modelcontextprotocol/sdk/types.js'
7 |
8 | type ToolHandler = (
9 | request: z.infer,
10 | ) => Promise
11 |
12 | export type ToolHandlers = Record
13 |
14 | export type ExtendedTool = Tool & { name: T }
15 |
--------------------------------------------------------------------------------
/tests/helpers/datadog.ts:
--------------------------------------------------------------------------------
1 | // Base URL for Datadog API
2 | export const baseUrl = 'https://api.datadoghq.com/api'
3 |
4 | export interface DatadogToolResponse {
5 | content: {
6 | type: 'text'
7 | text: string
8 | }[]
9 | }
10 |
--------------------------------------------------------------------------------
/tests/helpers/mock.ts:
--------------------------------------------------------------------------------
1 | interface MockToolRequest {
2 | method: 'tools/call'
3 | params: {
4 | name: string
5 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
6 | arguments: Record
7 | }
8 | }
9 |
10 | export function createMockToolRequest(
11 | toolName: string,
12 | // eslint-disable-next-line @typescript-eslint/no-explicit-any
13 | args: Record,
14 | ): MockToolRequest {
15 | return {
16 | method: 'tools/call',
17 | params: {
18 | name: toolName,
19 | arguments: args,
20 | },
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/tests/helpers/msw.ts:
--------------------------------------------------------------------------------
1 | import { RequestHandler } from 'msw'
2 | import { SetupServerApi, setupServer as setupServerNode } from 'msw/node'
3 |
4 | export function setupServer(...handlers: RequestHandler[]) {
5 | const server = setupServerNode(...handlers)
6 | debugServer(server)
7 | return server
8 | }
9 |
10 | function debugServer(server: SetupServerApi) {
11 | // Enable network request debugging
12 | server.listen({
13 | onUnhandledRequest: 'warn',
14 | })
15 |
16 | // Log all requests that pass through MSW
17 | server.events.on('request:start', ({ request }) => {
18 | console.log(`[MSW] Request started: ${request.method} ${request.url}`)
19 | })
20 |
21 | server.events.on('request:match', ({ request }) => {
22 | console.log(`[MSW] Request matched: ${request.method} ${request.url}`)
23 | })
24 |
25 | server.events.on('request:unhandled', ({ request }) => {
26 | console.log(`[MSW] Request not handled: ${request.method} ${request.url}`)
27 | })
28 | }
29 |
--------------------------------------------------------------------------------
/tests/setup.ts:
--------------------------------------------------------------------------------
1 | import { afterEach, vi } from 'vitest'
2 |
3 | process.env.DATADOG_API_KEY = 'test-api-key'
4 | process.env.DATADOG_APP_KEY = 'test-app-key'
5 |
6 | // Reset handlers after each test
7 | afterEach(() => {
8 | // server.resetHandlers()
9 | vi.clearAllMocks()
10 | })
11 |
--------------------------------------------------------------------------------
/tests/tools/dashboards.test.ts:
--------------------------------------------------------------------------------
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createDashboardsToolHandlers } from '../../src/tools/dashboards/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const dashboardEndpoint = `${baseUrl}/v1/dashboard`
11 |
12 | describe('Dashboards Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.DashboardsApi(datadogConfig)
24 | const toolHandlers = createDashboardsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/dashboards/#get-all-dashboards
27 | describe.concurrent('list_dashboards', async () => {
28 | it('should list dashboards', async () => {
29 | const mockHandler = http.get(dashboardEndpoint, async () => {
30 | return HttpResponse.json({
31 | dashboards: [
32 | {
33 | id: 'q5j-nti-fv6',
34 | type: 'host_timeboard',
35 | },
36 | ],
37 | })
38 | })
39 |
40 | const server = setupServer(mockHandler)
41 |
42 | await server.boundary(async () => {
43 | const request = createMockToolRequest('list_dashboards', {
44 | name: 'test name',
45 | tags: ['test_tag'],
46 | })
47 | const response = (await toolHandlers.list_dashboards(
48 | request,
49 | )) as unknown as DatadogToolResponse
50 | expect(response.content[0].text).toContain('Dashboards')
51 | })()
52 |
53 | server.close()
54 | })
55 |
56 | it('should handle authentication errors', async () => {
57 | const mockHandler = http.get(dashboardEndpoint, async () => {
58 | return HttpResponse.json(
59 | { errors: ['dummy authentication error'] },
60 | { status: 403 },
61 | )
62 | })
63 |
64 | const server = setupServer(mockHandler)
65 |
66 | await server.boundary(async () => {
67 | const request = createMockToolRequest('list_dashboards', {
68 | name: 'test',
69 | })
70 | await expect(toolHandlers.list_dashboards(request)).rejects.toThrow(
71 | 'dummy authentication error',
72 | )
73 | })()
74 |
75 | server.close()
76 | })
77 |
78 | it('should handle too many requests', async () => {
79 | const mockHandler = http.get(dashboardEndpoint, async () => {
80 | return HttpResponse.json(
81 | { errors: ['dummy too many requests'] },
82 | { status: 429 },
83 | )
84 | })
85 |
86 | const server = setupServer(mockHandler)
87 |
88 | await server.boundary(async () => {
89 | const request = createMockToolRequest('list_dashboards', {
90 | name: 'test',
91 | })
92 | await expect(toolHandlers.list_dashboards(request)).rejects.toThrow(
93 | 'dummy too many requests',
94 | )
95 | })()
96 |
97 | server.close()
98 | })
99 |
100 | it('should handle unknown errors', async () => {
101 | const mockHandler = http.get(dashboardEndpoint, async () => {
102 | return HttpResponse.json(
103 | { errors: ['dummy unknown error'] },
104 | { status: 500 },
105 | )
106 | })
107 |
108 | const server = setupServer(mockHandler)
109 |
110 | await server.boundary(async () => {
111 | const request = createMockToolRequest('list_dashboards', {
112 | name: 'test',
113 | })
114 | await expect(toolHandlers.list_dashboards(request)).rejects.toThrow(
115 | 'dummy unknown error',
116 | )
117 | })()
118 |
119 | server.close()
120 | })
121 | })
122 |
123 | // https://docs.datadoghq.com/ja/api/latest/dashboards/#get-a-dashboard
124 | describe.concurrent('get_dashboard', async () => {
125 | it('should get a dashboard', async () => {
126 | const dashboardId = '123456789'
127 | const mockHandler = http.get(
128 | `${dashboardEndpoint}/${dashboardId}`,
129 | async () => {
130 | return HttpResponse.json({
131 | id: '123456789',
132 | title: 'Dashboard',
133 | layout_type: 'ordered',
134 | widgets: [],
135 | })
136 | },
137 | )
138 |
139 | const server = setupServer(mockHandler)
140 |
141 | await server.boundary(async () => {
142 | const request = createMockToolRequest('get_dashboard', {
143 | dashboardId,
144 | })
145 | const response = (await toolHandlers.get_dashboard(
146 | request,
147 | )) as unknown as DatadogToolResponse
148 |
149 | expect(response.content[0].text).toContain('123456789')
150 | expect(response.content[0].text).toContain('Dashboard')
151 | expect(response.content[0].text).toContain('ordered')
152 | })()
153 |
154 | server.close()
155 | })
156 |
157 | it('should handle not found errors', async () => {
158 | const dashboardId = '999999999'
159 | const mockHandler = http.get(
160 | `${dashboardEndpoint}/${dashboardId}`,
161 | async () => {
162 | return HttpResponse.json({ errors: ['Not found'] }, { status: 404 })
163 | },
164 | )
165 |
166 | const server = setupServer(mockHandler)
167 |
168 | await server.boundary(async () => {
169 | const request = createMockToolRequest('get_dashboard', {
170 | dashboardId,
171 | })
172 | await expect(toolHandlers.get_dashboard(request)).rejects.toThrow(
173 | 'Not found',
174 | )
175 | })()
176 |
177 | server.close()
178 | })
179 |
180 | it('should handle server errors', async () => {
181 | const dashboardId = '123456789'
182 | const mockHandler = http.get(
183 | `${dashboardEndpoint}/${dashboardId}`,
184 | async () => {
185 | return HttpResponse.json(
186 | { errors: ['Internal server error'] },
187 | { status: 500 },
188 | )
189 | },
190 | )
191 |
192 | const server = setupServer(mockHandler)
193 |
194 | await server.boundary(async () => {
195 | const request = createMockToolRequest('get_dashboard', {
196 | dashboardId,
197 | })
198 | await expect(toolHandlers.get_dashboard(request)).rejects.toThrow(
199 | 'Internal server error',
200 | )
201 | })()
202 |
203 | server.close()
204 | })
205 | })
206 | })
207 |
--------------------------------------------------------------------------------
/tests/tools/downtimes.test.ts:
--------------------------------------------------------------------------------
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createDowntimesToolHandlers } from '../../src/tools/downtimes/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const downtimesEndpoint = `${baseUrl}/v1/downtime`
11 |
12 | describe('Downtimes Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.DowntimesApi(datadogConfig)
24 | const toolHandlers = createDowntimesToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/downtimes/#get-all-downtimes
27 | describe.concurrent('list_downtimes', async () => {
28 | it('should list downtimes', async () => {
29 | const mockHandler = http.get(downtimesEndpoint, async () => {
30 | return HttpResponse.json([
31 | {
32 | id: 123456789,
33 | active: true,
34 | disabled: false,
35 | start: 1640995100,
36 | end: 1640995200,
37 | scope: ['host:test-host'],
38 | message: 'Test downtime',
39 | monitor_id: 87654321,
40 | created: 1640995000,
41 | creator_id: 12345,
42 | updated_at: 1640995010,
43 | monitor_tags: ['env:test'],
44 | },
45 | {
46 | id: 987654321,
47 | active: false,
48 | disabled: false,
49 | start: 1641095100,
50 | end: 1641095200,
51 | scope: ['service:web'],
52 | message: 'Another test downtime',
53 | monitor_id: null,
54 | created: 1641095000,
55 | creator_id: 12345,
56 | updated_at: 1641095010,
57 | monitor_tags: ['service:web'],
58 | },
59 | ])
60 | })
61 |
62 | const server = setupServer(mockHandler)
63 |
64 | await server.boundary(async () => {
65 | const request = createMockToolRequest('list_downtimes', {
66 | currentOnly: true,
67 | })
68 | const response = (await toolHandlers.list_downtimes(
69 | request,
70 | )) as unknown as DatadogToolResponse
71 |
72 | expect(response.content[0].text).toContain('Listed downtimes:')
73 | expect(response.content[0].text).toContain('Test downtime')
74 | expect(response.content[0].text).toContain('Another test downtime')
75 | })()
76 |
77 | server.close()
78 | })
79 |
80 | it('should handle empty response', async () => {
81 | const mockHandler = http.get(downtimesEndpoint, async () => {
82 | return HttpResponse.json([])
83 | })
84 |
85 | const server = setupServer(mockHandler)
86 |
87 | await server.boundary(async () => {
88 | const request = createMockToolRequest('list_downtimes', {
89 | currentOnly: false,
90 | })
91 | const response = (await toolHandlers.list_downtimes(
92 | request,
93 | )) as unknown as DatadogToolResponse
94 |
95 | expect(response.content[0].text).toContain('Listed downtimes:')
96 | expect(response.content[0].text).toContain('[]')
97 | })()
98 |
99 | server.close()
100 | })
101 |
102 | it('should handle authentication errors', async () => {
103 | const mockHandler = http.get(downtimesEndpoint, async () => {
104 | return HttpResponse.json(
105 | { errors: ['Authentication failed'] },
106 | { status: 403 },
107 | )
108 | })
109 |
110 | const server = setupServer(mockHandler)
111 |
112 | await server.boundary(async () => {
113 | const request = createMockToolRequest('list_downtimes', {})
114 | await expect(toolHandlers.list_downtimes(request)).rejects.toThrow()
115 | })()
116 |
117 | server.close()
118 | })
119 |
120 | it('should handle rate limit errors', async () => {
121 | const mockHandler = http.get(downtimesEndpoint, async () => {
122 | return HttpResponse.json(
123 | { errors: ['Rate limit exceeded'] },
124 | { status: 429 },
125 | )
126 | })
127 |
128 | const server = setupServer(mockHandler)
129 |
130 | await server.boundary(async () => {
131 | const request = createMockToolRequest('list_downtimes', {})
132 | await expect(toolHandlers.list_downtimes(request)).rejects.toThrow(
133 | 'Rate limit exceeded',
134 | )
135 | })()
136 |
137 | server.close()
138 | })
139 | })
140 |
141 | // https://docs.datadoghq.com/api/latest/downtimes/#schedule-a-downtime
142 | describe.concurrent('schedule_downtime', async () => {
143 | it('should schedule a downtime', async () => {
144 | const mockHandler = http.post(downtimesEndpoint, async () => {
145 | return HttpResponse.json({
146 | id: 123456789,
147 | active: true,
148 | disabled: false,
149 | start: 1640995100,
150 | end: 1640995200,
151 | scope: ['host:test-host'],
152 | message: 'Scheduled maintenance',
153 | monitor_id: null,
154 | timezone: 'UTC',
155 | created: 1640995000,
156 | creator_id: 12345,
157 | updated_at: 1640995000,
158 | })
159 | })
160 |
161 | const server = setupServer(mockHandler)
162 |
163 | await server.boundary(async () => {
164 | const request = createMockToolRequest('schedule_downtime', {
165 | scope: 'host:test-host',
166 | start: 1640995100,
167 | end: 1640995200,
168 | message: 'Scheduled maintenance',
169 | timezone: 'UTC',
170 | })
171 | const response = (await toolHandlers.schedule_downtime(
172 | request,
173 | )) as unknown as DatadogToolResponse
174 |
175 | expect(response.content[0].text).toContain('Scheduled downtime:')
176 | expect(response.content[0].text).toContain('123456789')
177 | expect(response.content[0].text).toContain('Scheduled maintenance')
178 | })()
179 |
180 | server.close()
181 | })
182 |
183 | it('should schedule a recurring downtime', async () => {
184 | const mockHandler = http.post(downtimesEndpoint, async () => {
185 | return HttpResponse.json({
186 | id: 123456789,
187 | active: true,
188 | disabled: false,
189 | message: 'Weekly maintenance',
190 | scope: ['service:api'],
191 | recurrence: {
192 | type: 'weeks',
193 | period: 1,
194 | week_days: ['Mon'],
195 | },
196 | created: 1640995000,
197 | creator_id: 12345,
198 | updated_at: 1640995000,
199 | })
200 | })
201 |
202 | const server = setupServer(mockHandler)
203 |
204 | await server.boundary(async () => {
205 | const request = createMockToolRequest('schedule_downtime', {
206 | scope: 'service:api',
207 | message: 'Weekly maintenance',
208 | recurrence: {
209 | type: 'weeks',
210 | period: 1,
211 | weekDays: ['Mon'],
212 | },
213 | })
214 | const response = (await toolHandlers.schedule_downtime(
215 | request,
216 | )) as unknown as DatadogToolResponse
217 |
218 | expect(response.content[0].text).toContain('Scheduled downtime:')
219 | expect(response.content[0].text).toContain('Weekly maintenance')
220 | expect(response.content[0].text).toContain('weeks')
221 | expect(response.content[0].text).toContain('Mon')
222 | })()
223 |
224 | server.close()
225 | })
226 |
227 | it('should handle validation errors', async () => {
228 | const mockHandler = http.post(downtimesEndpoint, async () => {
229 | return HttpResponse.json(
230 | { errors: ['Invalid scope format'] },
231 | { status: 400 },
232 | )
233 | })
234 |
235 | const server = setupServer(mockHandler)
236 |
237 | await server.boundary(async () => {
238 | const request = createMockToolRequest('schedule_downtime', {
239 | scope: 'invalid:format',
240 | start: 1640995100,
241 | end: 1640995200,
242 | })
243 | await expect(toolHandlers.schedule_downtime(request)).rejects.toThrow(
244 | 'Invalid scope format',
245 | )
246 | })()
247 |
248 | server.close()
249 | })
250 | })
251 |
252 | // https://docs.datadoghq.com/api/latest/downtimes/#cancel-a-downtime
253 | describe.concurrent('cancel_downtime', async () => {
254 | it('should cancel a downtime', async () => {
255 | const downtimeId = 123456789
256 | const mockHandler = http.delete(
257 | `${downtimesEndpoint}/${downtimeId}`,
258 | async () => {
259 | return new HttpResponse(null, { status: 204 })
260 | },
261 | )
262 |
263 | const server = setupServer(mockHandler)
264 |
265 | await server.boundary(async () => {
266 | const request = createMockToolRequest('cancel_downtime', {
267 | downtimeId,
268 | })
269 | const response = (await toolHandlers.cancel_downtime(
270 | request,
271 | )) as unknown as DatadogToolResponse
272 |
273 | expect(response.content[0].text).toContain(
274 | `Cancelled downtime with ID: ${downtimeId}`,
275 | )
276 | })()
277 |
278 | server.close()
279 | })
280 |
281 | it('should handle not found errors', async () => {
282 | const downtimeId = 999999999
283 | const mockHandler = http.delete(
284 | `${downtimesEndpoint}/${downtimeId}`,
285 | async () => {
286 | return HttpResponse.json(
287 | { errors: ['Downtime not found'] },
288 | { status: 404 },
289 | )
290 | },
291 | )
292 |
293 | const server = setupServer(mockHandler)
294 |
295 | await server.boundary(async () => {
296 | const request = createMockToolRequest('cancel_downtime', {
297 | downtimeId,
298 | })
299 | await expect(toolHandlers.cancel_downtime(request)).rejects.toThrow(
300 | 'Downtime not found',
301 | )
302 | })()
303 |
304 | server.close()
305 | })
306 |
307 | it('should handle server errors', async () => {
308 | const downtimeId = 123456789
309 | const mockHandler = http.delete(
310 | `${downtimesEndpoint}/${downtimeId}`,
311 | async () => {
312 | return HttpResponse.json(
313 | { errors: ['Internal server error'] },
314 | { status: 500 },
315 | )
316 | },
317 | )
318 |
319 | const server = setupServer(mockHandler)
320 |
321 | await server.boundary(async () => {
322 | const request = createMockToolRequest('cancel_downtime', {
323 | downtimeId,
324 | })
325 | await expect(toolHandlers.cancel_downtime(request)).rejects.toThrow(
326 | 'Internal server error',
327 | )
328 | })()
329 |
330 | server.close()
331 | })
332 | })
333 | })
334 |
--------------------------------------------------------------------------------
/tests/tools/hosts.test.ts:
--------------------------------------------------------------------------------
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createHostsToolHandlers } from '../../src/tools/hosts/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const hostsBaseEndpoint = `${baseUrl}/v1/hosts`
11 | const hostBaseEndpoint = `${baseUrl}/v1/host`
12 | const hostTotalsEndpoint = `${hostsBaseEndpoint}/totals`
13 |
14 | describe('Hosts Tool', () => {
15 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
16 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
17 | }
18 |
19 | const datadogConfig = createDatadogConfig({
20 | apiKeyAuth: process.env.DATADOG_API_KEY,
21 | appKeyAuth: process.env.DATADOG_APP_KEY,
22 | site: process.env.DATADOG_SITE,
23 | })
24 |
25 | const apiInstance = new v1.HostsApi(datadogConfig)
26 | const toolHandlers = createHostsToolHandlers(apiInstance)
27 |
28 | // https://docs.datadoghq.com/api/latest/hosts/#get-all-hosts
29 | describe.concurrent('list_hosts', async () => {
30 | it('should list hosts with filters', async () => {
31 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
32 | return HttpResponse.json({
33 | host_list: [
34 | {
35 | name: 'web-server-01',
36 | id: 12345,
37 | aliases: ['web-server-01.example.com'],
38 | apps: ['nginx', 'redis'],
39 | is_muted: false,
40 | last_reported_time: 1640995100,
41 | meta: {
42 | platform: 'linux',
43 | agent_version: '7.36.1',
44 | socket_hostname: 'web-server-01',
45 | },
46 | metrics: {
47 | load: 0.5,
48 | cpu: 45.6,
49 | memory: 78.2,
50 | },
51 | sources: ['agent'],
52 | up: true,
53 | },
54 | {
55 | name: 'db-server-01',
56 | id: 67890,
57 | aliases: ['db-server-01.example.com'],
58 | apps: ['postgres'],
59 | is_muted: true,
60 | last_reported_time: 1640995000,
61 | meta: {
62 | platform: 'linux',
63 | agent_version: '7.36.1',
64 | socket_hostname: 'db-server-01',
65 | },
66 | metrics: {
67 | load: 1.2,
68 | cpu: 78.3,
69 | memory: 92.1,
70 | },
71 | sources: ['agent'],
72 | up: true,
73 | },
74 | ],
75 | total_matching: 2,
76 | total_returned: 2,
77 | })
78 | })
79 |
80 | const server = setupServer(mockHandler)
81 |
82 | await server.boundary(async () => {
83 | const request = createMockToolRequest('list_hosts', {
84 | filter: 'env:production',
85 | sort_field: 'status',
86 | sort_dir: 'desc',
87 | include_hosts_metadata: true,
88 | })
89 | const response = (await toolHandlers.list_hosts(
90 | request,
91 | )) as unknown as DatadogToolResponse
92 |
93 | expect(response.content[0].text).toContain('Hosts:')
94 | expect(response.content[0].text).toContain('web-server-01')
95 | expect(response.content[0].text).toContain('db-server-01')
96 | expect(response.content[0].text).toContain('postgres')
97 | })()
98 |
99 | server.close()
100 | })
101 |
102 | it('should handle empty response', async () => {
103 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
104 | return HttpResponse.json({
105 | host_list: [],
106 | total_matching: 0,
107 | total_returned: 0,
108 | })
109 | })
110 |
111 | const server = setupServer(mockHandler)
112 |
113 | await server.boundary(async () => {
114 | const request = createMockToolRequest('list_hosts', {
115 | filter: 'non-existent:value',
116 | })
117 | const response = (await toolHandlers.list_hosts(
118 | request,
119 | )) as unknown as DatadogToolResponse
120 |
121 | expect(response.content[0].text).toContain('Hosts: []')
122 | })()
123 |
124 | server.close()
125 | })
126 |
127 | it('should handle missing host_list', async () => {
128 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
129 | return HttpResponse.json({
130 | total_matching: 0,
131 | total_returned: 0,
132 | })
133 | })
134 |
135 | const server = setupServer(mockHandler)
136 |
137 | await server.boundary(async () => {
138 | const request = createMockToolRequest('list_hosts', {})
139 | await expect(toolHandlers.list_hosts(request)).rejects.toThrow(
140 | 'No hosts data returned',
141 | )
142 | })()
143 |
144 | server.close()
145 | })
146 |
147 | it('should handle authentication errors', async () => {
148 | const mockHandler = http.get(hostsBaseEndpoint, async () => {
149 | return HttpResponse.json(
150 | { errors: ['Authentication failed'] },
151 | { status: 403 },
152 | )
153 | })
154 |
155 | const server = setupServer(mockHandler)
156 |
157 | await server.boundary(async () => {
158 | const request = createMockToolRequest('list_hosts', {})
159 | await expect(toolHandlers.list_hosts(request)).rejects.toThrow()
160 | })()
161 |
162 | server.close()
163 | })
164 | })
165 |
166 | // https://docs.datadoghq.com/api/latest/hosts/#get-the-total-number-of-active-hosts
167 | describe.concurrent('get_active_hosts_count', async () => {
168 | it('should get active hosts count', async () => {
169 | const mockHandler = http.get(hostTotalsEndpoint, async () => {
170 | return HttpResponse.json({
171 | total_up: 512,
172 | total_active: 520,
173 | })
174 | })
175 |
176 | const server = setupServer(mockHandler)
177 |
178 | await server.boundary(async () => {
179 | const request = createMockToolRequest('get_active_hosts_count', {
180 | from: 3600,
181 | })
182 | const response = (await toolHandlers.get_active_hosts_count(
183 | request,
184 | )) as unknown as DatadogToolResponse
185 |
186 | expect(response.content[0].text).toContain('total_active')
187 | expect(response.content[0].text).toContain('520')
188 | expect(response.content[0].text).toContain('total_up')
189 | expect(response.content[0].text).toContain('512')
190 | })()
191 |
192 | server.close()
193 | })
194 |
195 | it('should use default from value if not provided', async () => {
196 | const mockHandler = http.get(hostTotalsEndpoint, async () => {
197 | return HttpResponse.json({
198 | total_up: 510,
199 | total_active: 518,
200 | })
201 | })
202 |
203 | const server = setupServer(mockHandler)
204 |
205 | await server.boundary(async () => {
206 | const request = createMockToolRequest('get_active_hosts_count', {})
207 | const response = (await toolHandlers.get_active_hosts_count(
208 | request,
209 | )) as unknown as DatadogToolResponse
210 |
211 | expect(response.content[0].text).toContain('518')
212 | expect(response.content[0].text).toContain('510')
213 | })()
214 |
215 | server.close()
216 | })
217 |
218 | it('should handle server errors', async () => {
219 | const mockHandler = http.get(hostTotalsEndpoint, async () => {
220 | return HttpResponse.json(
221 | { errors: ['Internal server error'] },
222 | { status: 500 },
223 | )
224 | })
225 |
226 | const server = setupServer(mockHandler)
227 |
228 | await server.boundary(async () => {
229 | const request = createMockToolRequest('get_active_hosts_count', {})
230 | await expect(
231 | toolHandlers.get_active_hosts_count(request),
232 | ).rejects.toThrow()
233 | })()
234 |
235 | server.close()
236 | })
237 | })
238 |
239 | // https://docs.datadoghq.com/api/latest/hosts/#mute-a-host
240 | describe.concurrent('mute_host', async () => {
241 | it('should mute a host', async () => {
242 | const mockHandler = http.post(
243 | `${hostBaseEndpoint}/:hostname/mute`,
244 | async ({ params }) => {
245 | return HttpResponse.json({
246 | action: 'muted',
247 | hostname: params.hostname,
248 | message: 'Maintenance in progress',
249 | end: 1641095100,
250 | })
251 | },
252 | )
253 |
254 | const server = setupServer(mockHandler)
255 |
256 | await server.boundary(async () => {
257 | const request = createMockToolRequest('mute_host', {
258 | hostname: 'test-host',
259 | message: 'Maintenance in progress',
260 | end: 1641095100,
261 | override: true,
262 | })
263 | const response = (await toolHandlers.mute_host(
264 | request,
265 | )) as unknown as DatadogToolResponse
266 |
267 | expect(response.content[0].text).toContain('success')
268 | expect(response.content[0].text).toContain('test-host')
269 | expect(response.content[0].text).toContain('Maintenance in progress')
270 | })()
271 |
272 | server.close()
273 | })
274 |
275 | it('should handle host not found', async () => {
276 | const mockHandler = http.post(
277 | `${hostBaseEndpoint}/:hostname/mute`,
278 | async () => {
279 | return HttpResponse.json(
280 | { errors: ['Host not found'] },
281 | { status: 404 },
282 | )
283 | },
284 | )
285 |
286 | const server = setupServer(mockHandler)
287 |
288 | await server.boundary(async () => {
289 | const request = createMockToolRequest('mute_host', {
290 | hostname: 'non-existent-host',
291 | })
292 | await expect(toolHandlers.mute_host(request)).rejects.toThrow(
293 | 'Host not found',
294 | )
295 | })()
296 |
297 | server.close()
298 | })
299 | })
300 |
301 | // https://docs.datadoghq.com/api/latest/hosts/#unmute-a-host
302 | describe.concurrent('unmute_host', async () => {
303 | it('should unmute a host', async () => {
304 | const mockHandler = http.post(
305 | `${hostBaseEndpoint}/:hostname/unmute`,
306 | async ({ params }) => {
307 | return HttpResponse.json({
308 | action: 'unmuted',
309 | hostname: params.hostname,
310 | })
311 | },
312 | )
313 |
314 | const server = setupServer(mockHandler)
315 |
316 | await server.boundary(async () => {
317 | const request = createMockToolRequest('unmute_host', {
318 | hostname: 'test-host',
319 | })
320 | const response = (await toolHandlers.unmute_host(
321 | request,
322 | )) as unknown as DatadogToolResponse
323 |
324 | expect(response.content[0].text).toContain('success')
325 | expect(response.content[0].text).toContain('test-host')
326 | expect(response.content[0].text).toContain('unmuted')
327 | })()
328 |
329 | server.close()
330 | })
331 |
332 | it('should handle host not found', async () => {
333 | const mockHandler = http.post(
334 | `${hostBaseEndpoint}/:hostname/unmute`,
335 | async () => {
336 | return HttpResponse.json(
337 | { errors: ['Host not found'] },
338 | { status: 404 },
339 | )
340 | },
341 | )
342 |
343 | const server = setupServer(mockHandler)
344 |
345 | await server.boundary(async () => {
346 | const request = createMockToolRequest('unmute_host', {
347 | hostname: 'non-existent-host',
348 | })
349 | await expect(toolHandlers.unmute_host(request)).rejects.toThrow(
350 | 'Host not found',
351 | )
352 | })()
353 |
354 | server.close()
355 | })
356 |
357 | it('should handle host already unmuted', async () => {
358 | const mockHandler = http.post(
359 | `${hostBaseEndpoint}/:hostname/unmute`,
360 | async () => {
361 | return HttpResponse.json(
362 | { errors: ['Host is not muted'] },
363 | { status: 400 },
364 | )
365 | },
366 | )
367 |
368 | const server = setupServer(mockHandler)
369 |
370 | await server.boundary(async () => {
371 | const request = createMockToolRequest('unmute_host', {
372 | hostname: 'already-unmuted-host',
373 | })
374 | await expect(toolHandlers.unmute_host(request)).rejects.toThrow(
375 | 'Host is not muted',
376 | )
377 | })()
378 |
379 | server.close()
380 | })
381 | })
382 | })
383 |
--------------------------------------------------------------------------------
/tests/tools/incident.test.ts:
--------------------------------------------------------------------------------
1 | import { v2 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createIncidentToolHandlers } from '../../src/tools/incident/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const incidentsEndpoint = `${baseUrl}/v2/incidents`
11 |
12 | describe('Incident Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v2.IncidentsApi(datadogConfig)
24 | const toolHandlers = createIncidentToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/incidents/#get-a-list-of-incidents
27 | describe.concurrent('list_incidents', async () => {
28 | it('should list incidents with pagination', async () => {
29 | const mockHandler = http.get(incidentsEndpoint, async () => {
30 | return HttpResponse.json({
31 | data: [
32 | {
33 | id: 'incident-123',
34 | type: 'incidents',
35 | attributes: {
36 | title: 'API Outage',
37 | created: '2023-01-15T10:00:00.000Z',
38 | modified: '2023-01-15T11:30:00.000Z',
39 | status: 'active',
40 | severity: 'SEV-1',
41 | customer_impact_scope: 'All API services are down',
42 | customer_impact_start: '2023-01-15T10:00:00.000Z',
43 | customer_impacted: true,
44 | },
45 | relationships: {
46 | created_by: {
47 | data: {
48 | id: 'user-123',
49 | type: 'users',
50 | },
51 | },
52 | },
53 | },
54 | {
55 | id: 'incident-456',
56 | type: 'incidents',
57 | attributes: {
58 | title: 'Database Slowdown',
59 | created: '2023-01-10T09:00:00.000Z',
60 | modified: '2023-01-10T12:00:00.000Z',
61 | status: 'resolved',
62 | severity: 'SEV-2',
63 | customer_impact_scope: 'Database queries are slow',
64 | customer_impact_start: '2023-01-10T09:00:00.000Z',
65 | customer_impact_end: '2023-01-10T12:00:00.000Z',
66 | customer_impacted: true,
67 | },
68 | relationships: {
69 | created_by: {
70 | data: {
71 | id: 'user-456',
72 | type: 'users',
73 | },
74 | },
75 | },
76 | },
77 | ],
78 | meta: {
79 | pagination: {
80 | offset: 10,
81 | size: 20,
82 | total: 45,
83 | },
84 | },
85 | })
86 | })
87 |
88 | const server = setupServer(mockHandler)
89 |
90 | await server.boundary(async () => {
91 | const request = createMockToolRequest('list_incidents', {
92 | pageSize: 20,
93 | pageOffset: 10,
94 | })
95 | const response = (await toolHandlers.list_incidents(
96 | request,
97 | )) as unknown as DatadogToolResponse
98 |
99 | expect(response.content[0].text).toContain('Listed incidents:')
100 | expect(response.content[0].text).toContain('API Outage')
101 | expect(response.content[0].text).toContain('Database Slowdown')
102 | expect(response.content[0].text).toContain('incident-123')
103 | expect(response.content[0].text).toContain('incident-456')
104 | })()
105 |
106 | server.close()
107 | })
108 |
109 | it('should use default pagination parameters if not provided', async () => {
110 | const mockHandler = http.get(incidentsEndpoint, async () => {
111 | return HttpResponse.json({
112 | data: [
113 | {
114 | id: 'incident-789',
115 | type: 'incidents',
116 | attributes: {
117 | title: 'Network Connectivity Issues',
118 | status: 'active',
119 | },
120 | },
121 | ],
122 | meta: {
123 | pagination: {
124 | offset: 0,
125 | size: 10,
126 | total: 1,
127 | },
128 | },
129 | })
130 | })
131 |
132 | const server = setupServer(mockHandler)
133 |
134 | await server.boundary(async () => {
135 | const request = createMockToolRequest('list_incidents', {})
136 | const response = (await toolHandlers.list_incidents(
137 | request,
138 | )) as unknown as DatadogToolResponse
139 |
140 | expect(response.content[0].text).toContain('Listed incidents:')
141 | expect(response.content[0].text).toContain(
142 | 'Network Connectivity Issues',
143 | )
144 | })()
145 |
146 | server.close()
147 | })
148 |
149 | it('should handle empty response', async () => {
150 | const mockHandler = http.get(incidentsEndpoint, async () => {
151 | return HttpResponse.json({
152 | data: [],
153 | meta: {
154 | pagination: {
155 | offset: 0,
156 | size: 10,
157 | total: 0,
158 | },
159 | },
160 | })
161 | })
162 |
163 | const server = setupServer(mockHandler)
164 |
165 | await server.boundary(async () => {
166 | const request = createMockToolRequest('list_incidents', {})
167 | const response = (await toolHandlers.list_incidents(
168 | request,
169 | )) as unknown as DatadogToolResponse
170 |
171 | expect(response.content[0].text).toContain('Listed incidents:')
172 | expect(response.content[0].text).not.toContain('incident-')
173 | })()
174 |
175 | server.close()
176 | })
177 |
178 | it('should handle null data response', async () => {
179 | const mockHandler = http.get(incidentsEndpoint, async () => {
180 | return HttpResponse.json({
181 | data: null,
182 | meta: {
183 | pagination: {
184 | offset: 0,
185 | size: 10,
186 | total: 0,
187 | },
188 | },
189 | })
190 | })
191 |
192 | const server = setupServer(mockHandler)
193 |
194 | await server.boundary(async () => {
195 | const request = createMockToolRequest('list_incidents', {})
196 | await expect(toolHandlers.list_incidents(request)).rejects.toThrow(
197 | 'No incidents data returned',
198 | )
199 | })()
200 |
201 | server.close()
202 | })
203 |
204 | it('should handle authentication errors', async () => {
205 | const mockHandler = http.get(incidentsEndpoint, async () => {
206 | return HttpResponse.json(
207 | { errors: ['Authentication failed'] },
208 | { status: 403 },
209 | )
210 | })
211 |
212 | const server = setupServer(mockHandler)
213 |
214 | await server.boundary(async () => {
215 | const request = createMockToolRequest('list_incidents', {})
216 | await expect(toolHandlers.list_incidents(request)).rejects.toThrow()
217 | })()
218 |
219 | server.close()
220 | })
221 | })
222 |
223 | // https://docs.datadoghq.com/api/latest/incidents/#get-incident-details
224 | describe.concurrent('get_incident', async () => {
225 | it('should get a specific incident', async () => {
226 | const incidentId = 'incident-123'
227 | const specificIncidentEndpoint = `${incidentsEndpoint}/${incidentId}`
228 |
229 | const mockHandler = http.get(specificIncidentEndpoint, async () => {
230 | return HttpResponse.json({
231 | data: {
232 | id: 'incident-123',
233 | type: 'incidents',
234 | attributes: {
235 | title: 'API Outage',
236 | created: '2023-01-15T10:00:00.000Z',
237 | modified: '2023-01-15T11:30:00.000Z',
238 | status: 'active',
239 | severity: 'SEV-1',
240 | customer_impact_scope: 'All API services are down',
241 | customer_impact_start: '2023-01-15T10:00:00.000Z',
242 | customer_impacted: true,
243 | fields: {
244 | summary: 'Complete API outage affecting all customers',
245 | root_cause: 'Database connection pool exhausted',
246 | detection_method: 'Monitor alert',
247 | services: ['api', 'database'],
248 | teams: ['backend', 'sre'],
249 | },
250 | timeline: {
251 | entries: [
252 | {
253 | timestamp: '2023-01-15T10:00:00.000Z',
254 | content: 'Incident detected',
255 | type: 'incident_created',
256 | },
257 | {
258 | timestamp: '2023-01-15T10:05:00.000Z',
259 | content: 'Investigation started',
260 | type: 'comment',
261 | },
262 | ],
263 | },
264 | },
265 | relationships: {
266 | created_by: {
267 | data: {
268 | id: 'user-123',
269 | type: 'users',
270 | },
271 | },
272 | commander: {
273 | data: {
274 | id: 'user-456',
275 | type: 'users',
276 | },
277 | },
278 | },
279 | },
280 | })
281 | })
282 |
283 | const server = setupServer(mockHandler)
284 |
285 | await server.boundary(async () => {
286 | const request = createMockToolRequest('get_incident', {
287 | incidentId: 'incident-123',
288 | })
289 | const response = (await toolHandlers.get_incident(
290 | request,
291 | )) as unknown as DatadogToolResponse
292 |
293 | expect(response.content[0].text).toContain('Incident:')
294 | expect(response.content[0].text).toContain('API Outage')
295 | expect(response.content[0].text).toContain('incident-123')
296 | expect(response.content[0].text).toContain('SEV-1')
297 | expect(response.content[0].text).toContain(
298 | 'Database connection pool exhausted',
299 | )
300 | })()
301 |
302 | server.close()
303 | })
304 |
305 | it('should handle incident not found', async () => {
306 | const incidentId = 'non-existent-incident'
307 | const specificIncidentEndpoint = `${incidentsEndpoint}/${incidentId}`
308 |
309 | const mockHandler = http.get(specificIncidentEndpoint, async () => {
310 | return HttpResponse.json(
311 | { errors: ['Incident not found'] },
312 | { status: 404 },
313 | )
314 | })
315 |
316 | const server = setupServer(mockHandler)
317 |
318 | await server.boundary(async () => {
319 | const request = createMockToolRequest('get_incident', {
320 | incidentId: 'non-existent-incident',
321 | })
322 | await expect(toolHandlers.get_incident(request)).rejects.toThrow(
323 | 'Incident not found',
324 | )
325 | })()
326 |
327 | server.close()
328 | })
329 |
330 | it('should handle null data response', async () => {
331 | const incidentId = 'incident-123'
332 | const specificIncidentEndpoint = `${incidentsEndpoint}/${incidentId}`
333 |
334 | const mockHandler = http.get(specificIncidentEndpoint, async () => {
335 | return HttpResponse.json({
336 | data: null,
337 | })
338 | })
339 |
340 | const server = setupServer(mockHandler)
341 |
342 | await server.boundary(async () => {
343 | const request = createMockToolRequest('get_incident', {
344 | incidentId: 'incident-123',
345 | })
346 | await expect(toolHandlers.get_incident(request)).rejects.toThrow(
347 | 'No incident data returned',
348 | )
349 | })()
350 |
351 | server.close()
352 | })
353 |
354 | it('should handle server errors', async () => {
355 | const incidentId = 'incident-123'
356 | const specificIncidentEndpoint = `${incidentsEndpoint}/${incidentId}`
357 |
358 | const mockHandler = http.get(specificIncidentEndpoint, async () => {
359 | return HttpResponse.json(
360 | { errors: ['Internal server error'] },
361 | { status: 500 },
362 | )
363 | })
364 |
365 | const server = setupServer(mockHandler)
366 |
367 | await server.boundary(async () => {
368 | const request = createMockToolRequest('get_incident', {
369 | incidentId: 'incident-123',
370 | })
371 | await expect(toolHandlers.get_incident(request)).rejects.toThrow(
372 | 'Internal server error',
373 | )
374 | })()
375 |
376 | server.close()
377 | })
378 | })
379 | })
380 |
--------------------------------------------------------------------------------
/tests/tools/logs.test.ts:
--------------------------------------------------------------------------------
1 | import { v2 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createLogsToolHandlers } from '../../src/tools/logs/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const logsEndpoint = `${baseUrl}/v2/logs/events/search`
11 |
12 | describe('Logs Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v2.LogsApi(datadogConfig)
24 | const toolHandlers = createLogsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/logs/#search-logs
27 | describe.concurrent('get_logs', async () => {
28 | it('should retrieve logs', async () => {
29 | // Mock API response based on Datadog API documentation
30 | const mockHandler = http.post(logsEndpoint, async () => {
31 | return HttpResponse.json({
32 | data: [
33 | {
34 | id: 'AAAAAXGLdD0AAABPV-5whqgB',
35 | attributes: {
36 | timestamp: 1640995199999,
37 | status: 'info',
38 | message: 'Test log message',
39 | service: 'test-service',
40 | tags: ['env:test'],
41 | },
42 | type: 'log',
43 | },
44 | ],
45 | meta: {
46 | page: {
47 | after:
48 | 'eyJzdGFydEF0IjoiQVFBQUFYR0xkRDBBQUFCUFYtNXdocWdCIiwiaW5kZXgiOiJtYWluIn0=',
49 | },
50 | },
51 | })
52 | })
53 |
54 | const server = setupServer(mockHandler)
55 |
56 | await server.boundary(async () => {
57 | const request = createMockToolRequest('get_logs', {
58 | query: 'service:test-service',
59 | from: 1640995100, // epoch seconds
60 | to: 1640995200, // epoch seconds
61 | limit: 10,
62 | })
63 | const response = (await toolHandlers.get_logs(
64 | request,
65 | )) as unknown as DatadogToolResponse
66 | expect(response.content[0].text).toContain('Logs data')
67 | expect(response.content[0].text).toContain('Test log message')
68 | })()
69 |
70 | server.close()
71 | })
72 |
73 | it('should handle empty response', async () => {
74 | const mockHandler = http.post(logsEndpoint, async () => {
75 | return HttpResponse.json({
76 | data: [],
77 | meta: {
78 | page: {},
79 | },
80 | })
81 | })
82 |
83 | const server = setupServer(mockHandler)
84 |
85 | await server.boundary(async () => {
86 | const request = createMockToolRequest('get_logs', {
87 | query: 'service:non-existent',
88 | from: 1640995100,
89 | to: 1640995200,
90 | })
91 | const response = (await toolHandlers.get_logs(
92 | request,
93 | )) as unknown as DatadogToolResponse
94 | expect(response.content[0].text).toContain('Logs data')
95 | expect(response.content[0].text).toContain('[]')
96 | })()
97 |
98 | server.close()
99 | })
100 |
101 | it('should handle null response data', async () => {
102 | const mockHandler = http.post(logsEndpoint, async () => {
103 | return HttpResponse.json({
104 | data: null,
105 | meta: {
106 | page: {},
107 | },
108 | })
109 | })
110 |
111 | const server = setupServer(mockHandler)
112 |
113 | await server.boundary(async () => {
114 | const request = createMockToolRequest('get_logs', {
115 | query: 'service:test',
116 | from: 1640995100,
117 | to: 1640995200,
118 | })
119 | await expect(toolHandlers.get_logs(request)).rejects.toThrow(
120 | 'No logs data returned',
121 | )
122 | })()
123 |
124 | server.close()
125 | })
126 |
127 | it('should handle authentication errors', async () => {
128 | const mockHandler = http.post(logsEndpoint, async () => {
129 | return HttpResponse.json(
130 | { errors: ['Authentication failed'] },
131 | { status: 403 },
132 | )
133 | })
134 |
135 | const server = setupServer(mockHandler)
136 |
137 | await server.boundary(async () => {
138 | const request = createMockToolRequest('get_logs', {
139 | query: 'service:test',
140 | from: 1640995100,
141 | to: 1640995200,
142 | })
143 | await expect(toolHandlers.get_logs(request)).rejects.toThrow()
144 | })()
145 |
146 | server.close()
147 | })
148 |
149 | it('should handle rate limit errors', async () => {
150 | const mockHandler = http.post(logsEndpoint, async () => {
151 | return HttpResponse.json(
152 | { errors: ['Rate limit exceeded'] },
153 | { status: 429 },
154 | )
155 | })
156 |
157 | const server = setupServer(mockHandler)
158 |
159 | await server.boundary(async () => {
160 | const request = createMockToolRequest('get_logs', {
161 | query: 'service:test',
162 | from: 1640995100,
163 | to: 1640995200,
164 | })
165 | await expect(toolHandlers.get_logs(request)).rejects.toThrow(
166 | 'Rate limit exceeded',
167 | )
168 | })()
169 |
170 | server.close()
171 | })
172 |
173 | it('should handle server errors', async () => {
174 | const mockHandler = http.post(logsEndpoint, async () => {
175 | return HttpResponse.json(
176 | { errors: ['Internal server error'] },
177 | { status: 500 },
178 | )
179 | })
180 |
181 | const server = setupServer(mockHandler)
182 |
183 | await server.boundary(async () => {
184 | const request = createMockToolRequest('get_logs', {
185 | query: 'service:test',
186 | from: 1640995100,
187 | to: 1640995200,
188 | })
189 | await expect(toolHandlers.get_logs(request)).rejects.toThrow(
190 | 'Internal server error',
191 | )
192 | })()
193 |
194 | server.close()
195 | })
196 | })
197 |
198 | describe.concurrent('get_all_services', async () => {
199 | it('should extract unique service names from logs', async () => {
200 | // Mock API response with multiple services
201 | const mockHandler = http.post(logsEndpoint, async () => {
202 | return HttpResponse.json({
203 | data: [
204 | {
205 | id: 'AAAAAXGLdD0AAABPV-5whqgB',
206 | attributes: {
207 | timestamp: 1640995199000,
208 | status: 'info',
209 | message: 'Test log message 1',
210 | service: 'web-service',
211 | tags: ['env:test'],
212 | },
213 | type: 'log',
214 | },
215 | {
216 | id: 'AAAAAXGLdD0AAABPV-5whqgC',
217 | attributes: {
218 | timestamp: 1640995198000,
219 | status: 'info',
220 | message: 'Test log message 2',
221 | service: 'api-service',
222 | tags: ['env:test'],
223 | },
224 | type: 'log',
225 | },
226 | {
227 | id: 'AAAAAXGLdD0AAABPV-5whqgD',
228 | attributes: {
229 | timestamp: 1640995197000,
230 | status: 'info',
231 | message: 'Test log message 3',
232 | service: 'web-service', // Duplicate service to test uniqueness
233 | tags: ['env:test'],
234 | },
235 | type: 'log',
236 | },
237 | {
238 | id: 'AAAAAXGLdD0AAABPV-5whqgE',
239 | attributes: {
240 | timestamp: 1640995196000,
241 | status: 'error',
242 | message: 'Test error message',
243 | service: 'database-service',
244 | tags: ['env:test'],
245 | },
246 | type: 'log',
247 | },
248 | ],
249 | meta: {
250 | page: {},
251 | },
252 | })
253 | })
254 |
255 | const server = setupServer(mockHandler)
256 |
257 | await server.boundary(async () => {
258 | const request = createMockToolRequest('get_all_services', {
259 | query: '*',
260 | from: 1640995100, // epoch seconds
261 | to: 1640995200, // epoch seconds
262 | limit: 100,
263 | })
264 | const response = (await toolHandlers.get_all_services(
265 | request,
266 | )) as unknown as DatadogToolResponse
267 |
268 | expect(response.content[0].text).toContain('Services')
269 | // Check if response contains the expected services (sorted alphabetically)
270 | const expected = ['api-service', 'database-service', 'web-service']
271 | expected.forEach((service) => {
272 | expect(response.content[0].text).toContain(service)
273 | })
274 |
275 | // Check that we've extracted unique services (no duplicates)
276 | const servicesText = response.content[0].text
277 | const servicesJson = JSON.parse(
278 | servicesText.substring(
279 | servicesText.indexOf('['),
280 | servicesText.lastIndexOf(']') + 1,
281 | ),
282 | )
283 | expect(servicesJson).toHaveLength(3) // Only 3 unique services, not 4
284 | expect(servicesJson).toEqual(expected)
285 | })()
286 |
287 | server.close()
288 | })
289 |
290 | it('should handle logs with missing service attributes', async () => {
291 | const mockHandler = http.post(logsEndpoint, async () => {
292 | return HttpResponse.json({
293 | data: [
294 | {
295 | id: 'AAAAAXGLdD0AAABPV-5whqgB',
296 | attributes: {
297 | timestamp: 1640995199000,
298 | status: 'info',
299 | message: 'Test log message 1',
300 | service: 'web-service',
301 | tags: ['env:test'],
302 | },
303 | type: 'log',
304 | },
305 | {
306 | id: 'AAAAAXGLdD0AAABPV-5whqgC',
307 | attributes: {
308 | timestamp: 1640995198000,
309 | status: 'info',
310 | message: 'Test log message with no service',
311 | // No service attribute
312 | tags: ['env:test'],
313 | },
314 | type: 'log',
315 | },
316 | ],
317 | meta: {
318 | page: {},
319 | },
320 | })
321 | })
322 |
323 | const server = setupServer(mockHandler)
324 |
325 | await server.boundary(async () => {
326 | const request = createMockToolRequest('get_all_services', {
327 | query: '*',
328 | from: 1640995100,
329 | to: 1640995200,
330 | limit: 100,
331 | })
332 | const response = (await toolHandlers.get_all_services(
333 | request,
334 | )) as unknown as DatadogToolResponse
335 |
336 | expect(response.content[0].text).toContain('Services')
337 | expect(response.content[0].text).toContain('web-service')
338 |
339 | // Ensure we only have one service (the one with a defined service attribute)
340 | const servicesText = response.content[0].text
341 | const servicesJson = JSON.parse(
342 | servicesText.substring(
343 | servicesText.indexOf('['),
344 | servicesText.lastIndexOf(']') + 1,
345 | ),
346 | )
347 | expect(servicesJson).toHaveLength(1)
348 | })()
349 |
350 | server.close()
351 | })
352 |
353 | it('should handle empty response data', async () => {
354 | const mockHandler = http.post(logsEndpoint, async () => {
355 | return HttpResponse.json({
356 | data: [],
357 | meta: {
358 | page: {},
359 | },
360 | })
361 | })
362 |
363 | const server = setupServer(mockHandler)
364 |
365 | await server.boundary(async () => {
366 | const request = createMockToolRequest('get_all_services', {
367 | query: 'service:non-existent',
368 | from: 1640995100,
369 | to: 1640995200,
370 | limit: 100,
371 | })
372 | const response = (await toolHandlers.get_all_services(
373 | request,
374 | )) as unknown as DatadogToolResponse
375 |
376 | expect(response.content[0].text).toContain('Services')
377 | expect(response.content[0].text).toContain('[]') // Empty array of services
378 | })()
379 |
380 | server.close()
381 | })
382 | })
383 | })
384 |
--------------------------------------------------------------------------------
/tests/tools/metrics.test.ts:
--------------------------------------------------------------------------------
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createMetricsToolHandlers } from '../../src/tools/metrics/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const metricsEndpoint = `${baseUrl}/v1/query`
11 |
12 | describe('Metrics Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.MetricsApi(datadogConfig)
24 | const toolHandlers = createMetricsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/metrics/#query-timeseries-data-across-multiple-products
27 | describe.concurrent('query_metrics', async () => {
28 | it('should query metrics data', async () => {
29 | const mockHandler = http.get(metricsEndpoint, async () => {
30 | return HttpResponse.json({
31 | status: 'ok',
32 | query: 'avg:system.cpu.user{*}',
33 | series: [
34 | {
35 | metric: 'system.cpu.user',
36 | display_name: 'system.cpu.user',
37 | pointlist: [
38 | [1640995000000, 23.45],
39 | [1640995060000, 24.12],
40 | [1640995120000, 22.89],
41 | [1640995180000, 25.67],
42 | ],
43 | scope: 'host:web-01',
44 | expression: 'avg:system.cpu.user{*}',
45 | unit: [
46 | {
47 | family: 'percentage',
48 | scale_factor: 1,
49 | name: 'percent',
50 | short_name: '%',
51 | },
52 | ],
53 | },
54 | {
55 | metric: 'system.cpu.user',
56 | display_name: 'system.cpu.user',
57 | pointlist: [
58 | [1640995000000, 18.32],
59 | [1640995060000, 19.01],
60 | [1640995120000, 17.76],
61 | [1640995180000, 20.45],
62 | ],
63 | scope: 'host:web-02',
64 | expression: 'avg:system.cpu.user{*}',
65 | unit: [
66 | {
67 | family: 'percentage',
68 | scale_factor: 1,
69 | name: 'percent',
70 | short_name: '%',
71 | },
72 | ],
73 | },
74 | ],
75 | from_date: 1640995000000,
76 | to_date: 1641095000000,
77 | group_by: ['host'],
78 | })
79 | })
80 |
81 | const server = setupServer(mockHandler)
82 |
83 | await server.boundary(async () => {
84 | const request = createMockToolRequest('query_metrics', {
85 | from: 1640995000,
86 | to: 1641095000,
87 | query: 'avg:system.cpu.user{*}',
88 | })
89 | const response = (await toolHandlers.query_metrics(
90 | request,
91 | )) as unknown as DatadogToolResponse
92 |
93 | expect(response.content[0].text).toContain('Queried metrics data:')
94 | expect(response.content[0].text).toContain('system.cpu.user')
95 | expect(response.content[0].text).toContain('host:web-01')
96 | expect(response.content[0].text).toContain('host:web-02')
97 | expect(response.content[0].text).toContain('23.45')
98 | })()
99 |
100 | server.close()
101 | })
102 |
103 | it('should handle empty response', async () => {
104 | const mockHandler = http.get(metricsEndpoint, async () => {
105 | return HttpResponse.json({
106 | status: 'ok',
107 | query: 'avg:non.existent.metric{*}',
108 | series: [],
109 | from_date: 1640995000000,
110 | to_date: 1641095000000,
111 | })
112 | })
113 |
114 | const server = setupServer(mockHandler)
115 |
116 | await server.boundary(async () => {
117 | const request = createMockToolRequest('query_metrics', {
118 | from: 1640995000,
119 | to: 1641095000,
120 | query: 'avg:non.existent.metric{*}',
121 | })
122 | const response = (await toolHandlers.query_metrics(
123 | request,
124 | )) as unknown as DatadogToolResponse
125 |
126 | expect(response.content[0].text).toContain('Queried metrics data:')
127 | expect(response.content[0].text).toContain('series":[]')
128 | })()
129 |
130 | server.close()
131 | })
132 |
133 | it('should handle failed query status', async () => {
134 | const mockHandler = http.get(metricsEndpoint, async () => {
135 | return HttpResponse.json({
136 | status: 'error',
137 | message: 'Invalid query format',
138 | query: 'invalid:query:format',
139 | })
140 | })
141 |
142 | const server = setupServer(mockHandler)
143 |
144 | await server.boundary(async () => {
145 | const request = createMockToolRequest('query_metrics', {
146 | from: 1640995000,
147 | to: 1641095000,
148 | query: 'invalid:query:format',
149 | })
150 | const response = (await toolHandlers.query_metrics(
151 | request,
152 | )) as unknown as DatadogToolResponse
153 |
154 | expect(response.content[0].text).toContain('status":"error"')
155 | expect(response.content[0].text).toContain('Invalid query format')
156 | })()
157 |
158 | server.close()
159 | })
160 |
161 | it('should handle authentication errors', async () => {
162 | const mockHandler = http.get(metricsEndpoint, async () => {
163 | return HttpResponse.json(
164 | { errors: ['Authentication failed'] },
165 | { status: 403 },
166 | )
167 | })
168 |
169 | const server = setupServer(mockHandler)
170 |
171 | await server.boundary(async () => {
172 | const request = createMockToolRequest('query_metrics', {
173 | from: 1640995000,
174 | to: 1641095000,
175 | query: 'avg:system.cpu.user{*}',
176 | })
177 | await expect(toolHandlers.query_metrics(request)).rejects.toThrow()
178 | })()
179 |
180 | server.close()
181 | })
182 |
183 | it('should handle rate limit errors', async () => {
184 | const mockHandler = http.get(metricsEndpoint, async () => {
185 | return HttpResponse.json(
186 | { errors: ['Rate limit exceeded'] },
187 | { status: 429 },
188 | )
189 | })
190 |
191 | const server = setupServer(mockHandler)
192 |
193 | await server.boundary(async () => {
194 | const request = createMockToolRequest('query_metrics', {
195 | from: 1640995000,
196 | to: 1641095000,
197 | query: 'avg:system.cpu.user{*}',
198 | })
199 | await expect(toolHandlers.query_metrics(request)).rejects.toThrow(
200 | 'Rate limit exceeded',
201 | )
202 | })()
203 |
204 | server.close()
205 | })
206 |
207 | it('should handle invalid time range errors', async () => {
208 | const mockHandler = http.get(metricsEndpoint, async () => {
209 | return HttpResponse.json(
210 | { errors: ['Time range exceeds allowed limit'] },
211 | { status: 400 },
212 | )
213 | })
214 |
215 | const server = setupServer(mockHandler)
216 |
217 | await server.boundary(async () => {
218 | // Using a very large time range that might exceed limits
219 | const request = createMockToolRequest('query_metrics', {
220 | from: 1600000000, // Very old date
221 | to: 1700000000, // Very recent date
222 | query: 'avg:system.cpu.user{*}',
223 | })
224 | await expect(toolHandlers.query_metrics(request)).rejects.toThrow(
225 | 'Time range exceeds allowed limit',
226 | )
227 | })()
228 |
229 | server.close()
230 | })
231 | })
232 | })
233 |
--------------------------------------------------------------------------------
/tests/tools/monitors.test.ts:
--------------------------------------------------------------------------------
1 | import { v1 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createMonitorsToolHandlers } from '../../src/tools/monitors/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const monitorsEndpoint = `${baseUrl}/v1/monitor`
11 |
12 | describe('Monitors Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v1.MonitorsApi(datadogConfig)
24 | const toolHandlers = createMonitorsToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/monitors/#get-all-monitor-details
27 | describe.concurrent('get_monitors', async () => {
28 | it('should list monitors', async () => {
29 | const mockHandler = http.get(monitorsEndpoint, async () => {
30 | return HttpResponse.json([
31 | {
32 | id: 12345,
33 | name: 'Test API Monitor',
34 | type: 'metric alert',
35 | message: 'CPU usage is too high',
36 | tags: ['env:test', 'service:api'],
37 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
38 | overall_state: 'Alert',
39 | created: '2023-01-01T00:00:00.000Z',
40 | modified: '2023-01-02T00:00:00.000Z',
41 | },
42 | {
43 | id: 67890,
44 | name: 'Test Web Monitor',
45 | type: 'service check',
46 | message: 'Web service is down',
47 | tags: ['env:test', 'service:web'],
48 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
49 | overall_state: 'OK',
50 | created: '2023-02-01T00:00:00.000Z',
51 | modified: '2023-02-02T00:00:00.000Z',
52 | },
53 | ])
54 | })
55 |
56 | const server = setupServer(mockHandler)
57 |
58 | await server.boundary(async () => {
59 | const request = createMockToolRequest('get_monitors', {
60 | name: 'test-monitor',
61 | groupStates: ['alert', 'warn'],
62 | tags: ['env:test', 'service:api'],
63 | })
64 | const response = (await toolHandlers.get_monitors(
65 | request,
66 | )) as unknown as DatadogToolResponse
67 |
68 | // Check that monitors data is included
69 | expect(response.content[0].text).toContain('Monitors:')
70 | expect(response.content[0].text).toContain('Test API Monitor')
71 | expect(response.content[0].text).toContain('Test Web Monitor')
72 |
73 | // Check that summary is included
74 | expect(response.content[1].text).toContain('Summary of monitors:')
75 | expect(response.content[1].text).toContain('"alert":1')
76 | expect(response.content[1].text).toContain('"ok":1')
77 | })()
78 |
79 | server.close()
80 | })
81 |
82 | it('should handle monitors with various states', async () => {
83 | const mockHandler = http.get(monitorsEndpoint, async () => {
84 | return HttpResponse.json([
85 | {
86 | id: 1,
87 | name: 'Alert Monitor',
88 | overall_state: 'Alert',
89 | tags: ['env:test'],
90 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
91 | type: 'metric alert',
92 | },
93 | {
94 | id: 2,
95 | name: 'Warn Monitor',
96 | overall_state: 'Warn',
97 | tags: ['env:test'],
98 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
99 | type: 'metric alert',
100 | },
101 | {
102 | id: 3,
103 | name: 'No Data Monitor',
104 | overall_state: 'No Data',
105 | tags: ['env:test'],
106 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
107 | type: 'metric alert',
108 | },
109 | {
110 | id: 4,
111 | name: 'OK Monitor',
112 | overall_state: 'OK',
113 | tags: ['env:test'],
114 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
115 | type: 'metric alert',
116 | },
117 | {
118 | id: 5,
119 | name: 'Ignored Monitor',
120 | overall_state: 'Ignored',
121 | tags: ['env:test'],
122 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
123 | type: 'metric alert',
124 | },
125 | {
126 | id: 6,
127 | name: 'Skipped Monitor',
128 | overall_state: 'Skipped',
129 | tags: ['env:test'],
130 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
131 | type: 'metric alert',
132 | },
133 | {
134 | id: 7,
135 | name: 'Unknown Monitor',
136 | overall_state: 'Unknown',
137 | tags: ['env:test'],
138 | query: 'avg(last_5m):avg:system.cpu.user{*} > 80',
139 | type: 'metric alert',
140 | },
141 | ])
142 | })
143 |
144 | const server = setupServer(mockHandler)
145 |
146 | await server.boundary(async () => {
147 | const request = createMockToolRequest('get_monitors', {
148 | tags: ['env:test'],
149 | })
150 | const response = (await toolHandlers.get_monitors(
151 | request,
152 | )) as unknown as DatadogToolResponse
153 |
154 | // Check summary data has counts for all states
155 | expect(response.content[1].text).toContain('"alert":1')
156 | expect(response.content[1].text).toContain('"warn":1')
157 | expect(response.content[1].text).toContain('"noData":1')
158 | expect(response.content[1].text).toContain('"ok":1')
159 | expect(response.content[1].text).toContain('"ignored":1')
160 | expect(response.content[1].text).toContain('"skipped":1')
161 | expect(response.content[1].text).toContain('"unknown":1')
162 | })()
163 |
164 | server.close()
165 | })
166 |
167 | it('should handle empty response', async () => {
168 | const mockHandler = http.get(monitorsEndpoint, async () => {
169 | return HttpResponse.json([])
170 | })
171 |
172 | const server = setupServer(mockHandler)
173 |
174 | await server.boundary(async () => {
175 | const request = createMockToolRequest('get_monitors', {
176 | name: 'non-existent-monitor',
177 | })
178 | const response = (await toolHandlers.get_monitors(
179 | request,
180 | )) as unknown as DatadogToolResponse
181 |
182 | // Check that response contains empty array
183 | expect(response.content[0].text).toContain('Monitors: []')
184 |
185 | // Check that summary shows all zeros
186 | expect(response.content[1].text).toContain('"alert":0')
187 | expect(response.content[1].text).toContain('"warn":0')
188 | expect(response.content[1].text).toContain('"noData":0')
189 | expect(response.content[1].text).toContain('"ok":0')
190 | })()
191 |
192 | server.close()
193 | })
194 |
195 | it('should handle null response', async () => {
196 | const mockHandler = http.get(monitorsEndpoint, async () => {
197 | return HttpResponse.json(null)
198 | })
199 |
200 | const server = setupServer(mockHandler)
201 |
202 | await server.boundary(async () => {
203 | const request = createMockToolRequest('get_monitors', {})
204 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow(
205 | 'No monitors data returned',
206 | )
207 | })()
208 |
209 | server.close()
210 | })
211 |
212 | it('should handle authentication errors', async () => {
213 | const mockHandler = http.get(monitorsEndpoint, async () => {
214 | return HttpResponse.json(
215 | { errors: ['Authentication failed'] },
216 | { status: 403 },
217 | )
218 | })
219 |
220 | const server = setupServer(mockHandler)
221 |
222 | await server.boundary(async () => {
223 | const request = createMockToolRequest('get_monitors', {})
224 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow()
225 | })()
226 |
227 | server.close()
228 | })
229 |
230 | it('should handle rate limit errors', async () => {
231 | const mockHandler = http.get(monitorsEndpoint, async () => {
232 | return HttpResponse.json(
233 | { errors: ['Rate limit exceeded'] },
234 | { status: 429 },
235 | )
236 | })
237 |
238 | const server = setupServer(mockHandler)
239 |
240 | await server.boundary(async () => {
241 | const request = createMockToolRequest('get_monitors', {})
242 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow(
243 | 'Rate limit exceeded',
244 | )
245 | })()
246 |
247 | server.close()
248 | })
249 |
250 | it('should handle server errors', async () => {
251 | const mockHandler = http.get(monitorsEndpoint, async () => {
252 | return HttpResponse.json(
253 | { errors: ['Internal server error'] },
254 | { status: 500 },
255 | )
256 | })
257 |
258 | const server = setupServer(mockHandler)
259 |
260 | await server.boundary(async () => {
261 | const request = createMockToolRequest('get_monitors', {})
262 | await expect(toolHandlers.get_monitors(request)).rejects.toThrow(
263 | 'Internal server error',
264 | )
265 | })()
266 |
267 | server.close()
268 | })
269 | })
270 | })
271 |
--------------------------------------------------------------------------------
/tests/tools/rum.test.ts:
--------------------------------------------------------------------------------
1 | import { v2 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createRumToolHandlers } from '../../src/tools/rum/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const getCommonServer = () => {
11 | const server = setupServer(
12 | http.get(`${baseUrl}/v2/rum/events`, async () => {
13 | return HttpResponse.json({
14 | data: [
15 | {
16 | id: 'event1',
17 | attributes: {
18 | attributes: {
19 | application: {
20 | name: 'Application 1',
21 | },
22 | session: { id: 'sess1' },
23 | view: {
24 | load_time: 123,
25 | first_contentful_paint: 456,
26 | },
27 | },
28 | },
29 | },
30 | {
31 | id: 'event2',
32 | attributes: {
33 | attributes: {
34 | application: {
35 | name: 'Application 1',
36 | },
37 | session: { id: 'sess2' },
38 | view: {
39 | load_time: 789,
40 | first_contentful_paint: 101,
41 | },
42 | },
43 | },
44 | },
45 | {
46 | id: 'event3',
47 | attributes: {
48 | attributes: {
49 | application: {
50 | name: 'Application 2',
51 | },
52 | session: { id: 'sess3' },
53 | view: {
54 | load_time: 234,
55 | first_contentful_paint: 567,
56 | },
57 | },
58 | },
59 | },
60 | ],
61 | })
62 | }),
63 | )
64 | return server
65 | }
66 |
67 | describe('RUM Tools', () => {
68 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
69 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
70 | }
71 |
72 | const datadogConfig = createDatadogConfig({
73 | apiKeyAuth: process.env.DATADOG_API_KEY,
74 | appKeyAuth: process.env.DATADOG_APP_KEY,
75 | site: process.env.DATADOG_SITE,
76 | })
77 |
78 | const apiInstance = new v2.RUMApi(datadogConfig)
79 | const toolHandlers = createRumToolHandlers(apiInstance)
80 |
81 | describe.concurrent('get_rum_applications', async () => {
82 | it('should retrieve RUM applications', async () => {
83 | const server = setupServer(
84 | http.get(`${baseUrl}/v2/rum/applications`, async () => {
85 | return HttpResponse.json({
86 | data: [
87 | {
88 | attributes: {
89 | application_id: '7124cba6-8ffe-4122-a644-82c7f4c21ae0',
90 | name: 'Application 1',
91 | created_at: 1725949945579,
92 | created_by_handle: 'rex@rexskz.info',
93 | org_id: 1,
94 | type: 'browser',
95 | updated_at: 1725949945579,
96 | updated_by_handle: 'Datadog',
97 | },
98 | id: '7124cba6-8ffe-4122-a644-82c7f4c21ae0',
99 | type: 'rum_application',
100 | },
101 | ],
102 | })
103 | }),
104 | )
105 | await server.boundary(async () => {
106 | const request = createMockToolRequest('get_rum_applications', {})
107 | const response = (await toolHandlers.get_rum_applications(
108 | request,
109 | )) as unknown as DatadogToolResponse
110 |
111 | expect(response.content[0].text).toContain('RUM applications')
112 | expect(response.content[0].text).toContain('Application 1')
113 | expect(response.content[0].text).toContain('rum_application')
114 | })()
115 |
116 | server.close()
117 | })
118 | })
119 |
120 | describe.concurrent('get_rum_events', async () => {
121 | it('should retrieve RUM events', async () => {
122 | const server = getCommonServer()
123 | await server.boundary(async () => {
124 | const request = createMockToolRequest('get_rum_events', {
125 | query: '*',
126 | from: 1640995100,
127 | to: 1640995200,
128 | limit: 10,
129 | })
130 | const response = (await toolHandlers.get_rum_events(
131 | request,
132 | )) as unknown as DatadogToolResponse
133 |
134 | expect(response.content[0].text).toContain('RUM events data')
135 | expect(response.content[0].text).toContain('event1')
136 | expect(response.content[0].text).toContain('event2')
137 | expect(response.content[0].text).toContain('event3')
138 | })()
139 |
140 | server.close()
141 | })
142 | })
143 |
144 | describe.concurrent('get_rum_grouped_event_count', async () => {
145 | it('should retrieve grouped event counts by application name', async () => {
146 | const server = getCommonServer()
147 | await server.boundary(async () => {
148 | const request = createMockToolRequest('get_rum_grouped_event_count', {
149 | query: '*',
150 | from: 1640995100,
151 | to: 1640995200,
152 | groupBy: 'application.name',
153 | })
154 | const response = (await toolHandlers.get_rum_grouped_event_count(
155 | request,
156 | )) as unknown as DatadogToolResponse
157 |
158 | expect(response.content[0].text).toContain(
159 | 'Session counts (grouped by application.name): {"Application 1":2,"Application 2":1}',
160 | )
161 | })()
162 |
163 | server.close()
164 | })
165 |
166 | it('should handle custom query filter', async () => {
167 | const server = getCommonServer()
168 | await server.boundary(async () => {
169 | const request = createMockToolRequest('get_rum_grouped_event_count', {
170 | query: '@application.name:Application 1',
171 | from: 1640995100,
172 | to: 1640995200,
173 | groupBy: 'application.name',
174 | })
175 | const response = (await toolHandlers.get_rum_grouped_event_count(
176 | request,
177 | )) as unknown as DatadogToolResponse
178 |
179 | expect(response.content[0].text).toContain(
180 | 'Session counts (grouped by application.name):',
181 | )
182 | expect(response.content[0].text).toContain('"Application 1":2')
183 | })()
184 |
185 | server.close()
186 | })
187 |
188 | it('should handle deeper nested path for groupBy', async () => {
189 | const server = getCommonServer()
190 | await server.boundary(async () => {
191 | const request = createMockToolRequest('get_rum_grouped_event_count', {
192 | query: '*',
193 | from: 1640995100,
194 | to: 1640995200,
195 | groupBy: 'view.load_time',
196 | })
197 | const response = (await toolHandlers.get_rum_grouped_event_count(
198 | request,
199 | )) as unknown as DatadogToolResponse
200 |
201 | expect(response.content[0].text).toContain(
202 | 'Session counts (grouped by view.load_time):',
203 | )
204 | expect(response.content[0].text).toContain('"123":1')
205 | expect(response.content[0].text).toContain('"789":1')
206 | expect(response.content[0].text).toContain('"234":1')
207 | })()
208 |
209 | server.close()
210 | })
211 |
212 | it('should handle invalid groupBy path gracefully', async () => {
213 | const server = getCommonServer()
214 | await server.boundary(async () => {
215 | const request = createMockToolRequest('get_rum_grouped_event_count', {
216 | query: '*',
217 | from: 1640995100,
218 | to: 1640995200,
219 | groupBy: 'nonexistent.path',
220 | })
221 | const response = (await toolHandlers.get_rum_grouped_event_count(
222 | request,
223 | )) as unknown as DatadogToolResponse
224 |
225 | expect(response.content[0].text).toContain(
226 | 'Session counts (grouped by nonexistent.path): {"unknown":3}',
227 | )
228 | })()
229 |
230 | server.close()
231 | })
232 |
233 | it('should handle empty data response', async () => {
234 | const server = setupServer(
235 | http.get(`${baseUrl}/v2/rum/events`, async () => {
236 | return HttpResponse.json({
237 | data: [],
238 | })
239 | }),
240 | )
241 | await server.boundary(async () => {
242 | const request = createMockToolRequest('get_rum_grouped_event_count', {
243 | query: '*',
244 | from: 1640995100,
245 | to: 1640995200,
246 | groupBy: 'application.name',
247 | })
248 | const response = (await toolHandlers.get_rum_grouped_event_count(
249 | request,
250 | )) as unknown as DatadogToolResponse
251 |
252 | expect(response.content[0].text).toContain(
253 | 'Session counts (grouped by application.name): {}',
254 | )
255 | })()
256 |
257 | server.close()
258 | })
259 |
260 | it('should handle null data response', async () => {
261 | const server = setupServer(
262 | http.get(`${baseUrl}/v2/rum/events`, async () => {
263 | return HttpResponse.json({
264 | data: null,
265 | })
266 | }),
267 | )
268 | await server.boundary(async () => {
269 | const request = createMockToolRequest('get_rum_grouped_event_count', {
270 | query: '*',
271 | from: 1640995100,
272 | to: 1640995200,
273 | groupBy: 'application.name',
274 | })
275 | await expect(
276 | toolHandlers.get_rum_grouped_event_count(request),
277 | ).rejects.toThrow('No RUM events data returned')
278 | })()
279 |
280 | server.close()
281 | })
282 |
283 | it('should handle events without attributes field', async () => {
284 | const server = setupServer(
285 | http.get(`${baseUrl}/v2/rum/events`, async () => {
286 | return HttpResponse.json({
287 | data: [
288 | {
289 | id: 'event1',
290 | // Missing attributes field
291 | },
292 | {
293 | id: 'event2',
294 | attributes: {
295 | // Missing attributes.attributes field
296 | },
297 | },
298 | {
299 | id: 'event3',
300 | attributes: {
301 | attributes: {
302 | application: {
303 | name: 'Application 3',
304 | },
305 | // Missing session field
306 | },
307 | },
308 | },
309 | ],
310 | })
311 | }),
312 | )
313 | await server.boundary(async () => {
314 | const request = createMockToolRequest('get_rum_grouped_event_count', {
315 | query: '*',
316 | from: 1640995100,
317 | to: 1640995200,
318 | groupBy: 'application.name',
319 | })
320 | const response = (await toolHandlers.get_rum_grouped_event_count(
321 | request,
322 | )) as unknown as DatadogToolResponse
323 |
324 | expect(response.content[0].text).toContain(
325 | 'Session counts (grouped by application.name): {"Application 3":0}',
326 | )
327 | })()
328 |
329 | server.close()
330 | })
331 | })
332 |
333 | describe.concurrent('get_rum_page_performance', async () => {
334 | it('should retrieve page performance metrics', async () => {
335 | const server = getCommonServer()
336 | await server.boundary(async () => {
337 | const request = createMockToolRequest('get_rum_page_performance', {
338 | query: '*',
339 | from: 1640995100,
340 | to: 1640995200,
341 | metricNames: ['view.load_time', 'view.first_contentful_paint'],
342 | })
343 | const response = (await toolHandlers.get_rum_page_performance(
344 | request,
345 | )) as unknown as DatadogToolResponse
346 |
347 | expect(response.content[0].text).toContain(
348 | 'Page performance metrics: {"view.load_time":{"avg":382,"min":123,"max":789,"count":3},"view.first_contentful_paint":{"avg":374.6666666666667,"min":101,"max":567,"count":3}}',
349 | )
350 | })()
351 |
352 | server.close()
353 | })
354 |
355 | it('should use default metric names if not provided', async () => {
356 | const server = getCommonServer()
357 | await server.boundary(async () => {
358 | const request = createMockToolRequest('get_rum_page_performance', {
359 | query: '*',
360 | from: 1640995100,
361 | to: 1640995200,
362 | // metricNames not provided, should use defaults
363 | })
364 | const response = (await toolHandlers.get_rum_page_performance(
365 | request,
366 | )) as unknown as DatadogToolResponse
367 |
368 | expect(response.content[0].text).toContain('Page performance metrics')
369 | expect(response.content[0].text).toContain('view.load_time')
370 | expect(response.content[0].text).toContain(
371 | 'view.first_contentful_paint',
372 | )
373 | // Default also includes largest_contentful_paint, but our mock doesn't have this data
374 | expect(response.content[0].text).toContain(
375 | 'view.largest_contentful_paint',
376 | )
377 | })()
378 |
379 | server.close()
380 | })
381 |
382 | it('should handle custom query filter', async () => {
383 | const server = getCommonServer()
384 | await server.boundary(async () => {
385 | const request = createMockToolRequest('get_rum_page_performance', {
386 | query: '@application.name:Application 1',
387 | from: 1640995100,
388 | to: 1640995200,
389 | metricNames: ['view.load_time'],
390 | })
391 | const response = (await toolHandlers.get_rum_page_performance(
392 | request,
393 | )) as unknown as DatadogToolResponse
394 |
395 | expect(response.content[0].text).toContain('Page performance metrics')
396 | expect(response.content[0].text).toContain('view.load_time')
397 | })()
398 |
399 | server.close()
400 | })
401 |
402 | it('should handle empty data response', async () => {
403 | const server = setupServer(
404 | http.get(`${baseUrl}/v2/rum/events`, async () => {
405 | return HttpResponse.json({
406 | data: [],
407 | })
408 | }),
409 | )
410 | await server.boundary(async () => {
411 | const request = createMockToolRequest('get_rum_page_performance', {
412 | query: '*',
413 | from: 1640995100,
414 | to: 1640995200,
415 | metricNames: ['view.load_time', 'view.first_contentful_paint'],
416 | })
417 | const response = (await toolHandlers.get_rum_page_performance(
418 | request,
419 | )) as unknown as DatadogToolResponse
420 |
421 | expect(response.content[0].text).toContain('Page performance metrics')
422 | expect(response.content[0].text).toContain(
423 | '"view.load_time":{"avg":0,"min":0,"max":0,"count":0}',
424 | )
425 | expect(response.content[0].text).toContain(
426 | '"view.first_contentful_paint":{"avg":0,"min":0,"max":0,"count":0}',
427 | )
428 | })()
429 |
430 | server.close()
431 | })
432 |
433 | it('should handle null data response', async () => {
434 | const server = setupServer(
435 | http.get(`${baseUrl}/v2/rum/events`, async () => {
436 | return HttpResponse.json({
437 | data: null,
438 | })
439 | }),
440 | )
441 | await server.boundary(async () => {
442 | const request = createMockToolRequest('get_rum_page_performance', {
443 | query: '*',
444 | from: 1640995100,
445 | to: 1640995200,
446 | metricNames: ['view.load_time'],
447 | })
448 | await expect(
449 | toolHandlers.get_rum_page_performance(request),
450 | ).rejects.toThrow('No RUM events data returned')
451 | })()
452 |
453 | server.close()
454 | })
455 |
456 | it('should handle events without attributes field', async () => {
457 | const server = setupServer(
458 | http.get(`${baseUrl}/v2/rum/events`, async () => {
459 | return HttpResponse.json({
460 | data: [
461 | {
462 | id: 'event1',
463 | // Missing attributes field
464 | },
465 | {
466 | id: 'event2',
467 | attributes: {
468 | // Missing attributes.attributes field
469 | },
470 | },
471 | {
472 | id: 'event3',
473 | attributes: {
474 | attributes: {
475 | application: {
476 | name: 'Application 3',
477 | },
478 | // Missing view field with metrics
479 | },
480 | },
481 | },
482 | ],
483 | })
484 | }),
485 | )
486 | await server.boundary(async () => {
487 | const request = createMockToolRequest('get_rum_page_performance', {
488 | query: '*',
489 | from: 1640995100,
490 | to: 1640995200,
491 | metricNames: ['view.load_time', 'view.first_contentful_paint'],
492 | })
493 | const response = (await toolHandlers.get_rum_page_performance(
494 | request,
495 | )) as unknown as DatadogToolResponse
496 |
497 | expect(response.content[0].text).toContain('Page performance metrics')
498 | expect(response.content[0].text).toContain(
499 | '"view.load_time":{"avg":0,"min":0,"max":0,"count":0}',
500 | )
501 | expect(response.content[0].text).toContain(
502 | '"view.first_contentful_paint":{"avg":0,"min":0,"max":0,"count":0}',
503 | )
504 | })()
505 |
506 | server.close()
507 | })
508 |
509 | it('should handle deeply nested metric paths', async () => {
510 | const server = setupServer(
511 | http.get(`${baseUrl}/v2/rum/events`, async () => {
512 | return HttpResponse.json({
513 | data: [
514 | {
515 | id: 'event1',
516 | attributes: {
517 | attributes: {
518 | application: {
519 | name: 'Application 1',
520 | },
521 | deep: {
522 | nested: {
523 | metric: 42,
524 | },
525 | },
526 | },
527 | },
528 | },
529 | {
530 | id: 'event2',
531 | attributes: {
532 | attributes: {
533 | application: {
534 | name: 'Application 2',
535 | },
536 | deep: {
537 | nested: {
538 | metric: 84,
539 | },
540 | },
541 | },
542 | },
543 | },
544 | ],
545 | })
546 | }),
547 | )
548 | await server.boundary(async () => {
549 | const request = createMockToolRequest('get_rum_page_performance', {
550 | query: '*',
551 | from: 1640995100,
552 | to: 1640995200,
553 | metricNames: ['deep.nested.metric'],
554 | })
555 | const response = (await toolHandlers.get_rum_page_performance(
556 | request,
557 | )) as unknown as DatadogToolResponse
558 |
559 | expect(response.content[0].text).toContain('Page performance metrics')
560 | expect(response.content[0].text).toContain(
561 | '"deep.nested.metric":{"avg":63,"min":42,"max":84,"count":2}',
562 | )
563 | })()
564 |
565 | server.close()
566 | })
567 |
568 | it('should handle mixed metric availability', async () => {
569 | const server = setupServer(
570 | http.get(`${baseUrl}/v2/rum/events`, async () => {
571 | return HttpResponse.json({
572 | data: [
573 | {
574 | id: 'event1',
575 | attributes: {
576 | attributes: {
577 | view: {
578 | load_time: 100,
579 | // first_contentful_paint is missing
580 | },
581 | },
582 | },
583 | },
584 | {
585 | id: 'event2',
586 | attributes: {
587 | attributes: {
588 | view: {
589 | // load_time is missing
590 | first_contentful_paint: 200,
591 | },
592 | },
593 | },
594 | },
595 | ],
596 | })
597 | }),
598 | )
599 | await server.boundary(async () => {
600 | const request = createMockToolRequest('get_rum_page_performance', {
601 | query: '*',
602 | from: 1640995100,
603 | to: 1640995200,
604 | metricNames: ['view.load_time', 'view.first_contentful_paint'],
605 | })
606 | const response = (await toolHandlers.get_rum_page_performance(
607 | request,
608 | )) as unknown as DatadogToolResponse
609 |
610 | expect(response.content[0].text).toContain('Page performance metrics')
611 | expect(response.content[0].text).toContain(
612 | '"view.load_time":{"avg":100,"min":100,"max":100,"count":1}',
613 | )
614 | expect(response.content[0].text).toContain(
615 | '"view.first_contentful_paint":{"avg":200,"min":200,"max":200,"count":1}',
616 | )
617 | })()
618 |
619 | server.close()
620 | })
621 |
622 | it('should handle non-numeric values gracefully', async () => {
623 | const server = setupServer(
624 | http.get(`${baseUrl}/v2/rum/events`, async () => {
625 | return HttpResponse.json({
626 | data: [
627 | {
628 | id: 'event1',
629 | attributes: {
630 | attributes: {
631 | invalid_metric: 'not-a-number',
632 | view: {
633 | load_time: 100,
634 | },
635 | },
636 | },
637 | },
638 | ],
639 | })
640 | }),
641 | )
642 | await server.boundary(async () => {
643 | const request = createMockToolRequest('get_rum_page_performance', {
644 | query: '*',
645 | from: 1640995100,
646 | to: 1640995200,
647 | metricNames: ['invalid_metric', 'view.load_time'],
648 | })
649 | const response = (await toolHandlers.get_rum_page_performance(
650 | request,
651 | )) as unknown as DatadogToolResponse
652 |
653 | expect(response.content[0].text).toContain('Page performance metrics')
654 | expect(response.content[0].text).toContain(
655 | '"invalid_metric":{"avg":0,"min":0,"max":0,"count":0}',
656 | )
657 | expect(response.content[0].text).toContain(
658 | '"view.load_time":{"avg":100,"min":100,"max":100,"count":1}',
659 | )
660 | })()
661 |
662 | server.close()
663 | })
664 | })
665 |
666 | describe.concurrent('get_rum_page_waterfall', async () => {
667 | it('should retrieve page waterfall data', async () => {
668 | const server = getCommonServer()
669 | await server.boundary(async () => {
670 | const request = createMockToolRequest('get_rum_page_waterfall', {
671 | applicationName: 'Application 1',
672 | sessionId: 'sess1',
673 | })
674 | const response = (await toolHandlers.get_rum_page_waterfall(
675 | request,
676 | )) as unknown as DatadogToolResponse
677 |
678 | expect(response.content[0].text).toContain('Waterfall data')
679 | expect(response.content[0].text).toContain('event1')
680 | expect(response.content[0].text).toContain('event2')
681 | })()
682 |
683 | server.close()
684 | })
685 | })
686 | })
687 |
--------------------------------------------------------------------------------
/tests/tools/traces.test.ts:
--------------------------------------------------------------------------------
1 | import { v2 } from '@datadog/datadog-api-client'
2 | import { describe, it, expect } from 'vitest'
3 | import { createDatadogConfig } from '../../src/utils/datadog'
4 | import { createTracesToolHandlers } from '../../src/tools/traces/tool'
5 | import { createMockToolRequest } from '../helpers/mock'
6 | import { http, HttpResponse } from 'msw'
7 | import { setupServer } from '../helpers/msw'
8 | import { baseUrl, DatadogToolResponse } from '../helpers/datadog'
9 |
10 | const tracesEndpoint = `${baseUrl}/v2/spans/events/search`
11 |
12 | describe('Traces Tool', () => {
13 | if (!process.env.DATADOG_API_KEY || !process.env.DATADOG_APP_KEY) {
14 | throw new Error('DATADOG_API_KEY and DATADOG_APP_KEY must be set')
15 | }
16 |
17 | const datadogConfig = createDatadogConfig({
18 | apiKeyAuth: process.env.DATADOG_API_KEY,
19 | appKeyAuth: process.env.DATADOG_APP_KEY,
20 | site: process.env.DATADOG_SITE,
21 | })
22 |
23 | const apiInstance = new v2.SpansApi(datadogConfig)
24 | const toolHandlers = createTracesToolHandlers(apiInstance)
25 |
26 | // https://docs.datadoghq.com/api/latest/spans/#search-spans
27 | describe.concurrent('list_traces', async () => {
28 | it('should list traces with basic query', async () => {
29 | const mockHandler = http.post(tracesEndpoint, async () => {
30 | return HttpResponse.json({
31 | data: [
32 | {
33 | id: 'span-id-1',
34 | type: 'spans',
35 | attributes: {
36 | service: 'web-api',
37 | name: 'http.request',
38 | resource: 'GET /api/users',
39 | trace_id: 'trace-id-1',
40 | span_id: 'span-id-1',
41 | parent_id: 'parent-id-1',
42 | start: 1640995100000000000,
43 | duration: 500000000,
44 | error: 1,
45 | meta: {
46 | 'http.method': 'GET',
47 | 'http.status_code': '500',
48 | 'error.type': 'Internal Server Error',
49 | },
50 | },
51 | },
52 | {
53 | id: 'span-id-2',
54 | type: 'spans',
55 | attributes: {
56 | service: 'web-api',
57 | name: 'http.request',
58 | resource: 'GET /api/products',
59 | trace_id: 'trace-id-2',
60 | span_id: 'span-id-2',
61 | parent_id: 'parent-id-2',
62 | start: 1640995000000000000,
63 | duration: 300000000,
64 | error: 1,
65 | meta: {
66 | 'http.method': 'GET',
67 | 'http.status_code': '500',
68 | 'error.type': 'Internal Server Error',
69 | },
70 | },
71 | },
72 | ],
73 | meta: {
74 | page: {
75 | after: 'cursor-value',
76 | },
77 | },
78 | })
79 | })
80 |
81 | const server = setupServer(mockHandler)
82 |
83 | await server.boundary(async () => {
84 | const request = createMockToolRequest('list_traces', {
85 | query: 'http.status_code:500',
86 | from: 1640995000,
87 | to: 1640996000,
88 | limit: 50,
89 | })
90 | const response = (await toolHandlers.list_traces(
91 | request,
92 | )) as unknown as DatadogToolResponse
93 |
94 | expect(response.content[0].text).toContain('Traces:')
95 | expect(response.content[0].text).toContain('web-api')
96 | expect(response.content[0].text).toContain('GET /api/users')
97 | expect(response.content[0].text).toContain('GET /api/products')
98 | expect(response.content[0].text).toContain('count":2')
99 | })()
100 |
101 | server.close()
102 | })
103 |
104 | it('should include service and operation filters', async () => {
105 | const mockHandler = http.post(tracesEndpoint, async () => {
106 | return HttpResponse.json({
107 | data: [
108 | {
109 | id: 'span-id-3',
110 | type: 'spans',
111 | attributes: {
112 | service: 'payment-service',
113 | name: 'process-payment',
114 | resource: 'process-payment',
115 | trace_id: 'trace-id-3',
116 | span_id: 'span-id-3',
117 | parent_id: 'parent-id-3',
118 | start: 1640995100000000000,
119 | duration: 800000000,
120 | error: 1,
121 | meta: {
122 | 'error.type': 'PaymentProcessingError',
123 | },
124 | },
125 | },
126 | ],
127 | meta: {
128 | page: {
129 | after: null,
130 | },
131 | },
132 | })
133 | })
134 |
135 | const server = setupServer(mockHandler)
136 |
137 | await server.boundary(async () => {
138 | const request = createMockToolRequest('list_traces', {
139 | query: 'error:true',
140 | from: 1640995000,
141 | to: 1640996000,
142 | service: 'payment-service',
143 | operation: 'process-payment',
144 | })
145 | const response = (await toolHandlers.list_traces(
146 | request,
147 | )) as unknown as DatadogToolResponse
148 |
149 | expect(response.content[0].text).toContain('payment-service')
150 | expect(response.content[0].text).toContain('process-payment')
151 | expect(response.content[0].text).toContain('PaymentProcessingError')
152 | })()
153 |
154 | server.close()
155 | })
156 |
157 | it('should handle ascending sort', async () => {
158 | const mockHandler = http.post(tracesEndpoint, async () => {
159 | return HttpResponse.json({
160 | data: [
161 | {
162 | id: 'span-id-oldest',
163 | type: 'spans',
164 | attributes: {
165 | service: 'api',
166 | name: 'http.request',
167 | start: 1640995000000000000,
168 | },
169 | },
170 | {
171 | id: 'span-id-newest',
172 | type: 'spans',
173 | attributes: {
174 | service: 'api',
175 | name: 'http.request',
176 | start: 1640995100000000000,
177 | },
178 | },
179 | ],
180 | })
181 | })
182 |
183 | const server = setupServer(mockHandler)
184 |
185 | await server.boundary(async () => {
186 | const request = createMockToolRequest('list_traces', {
187 | query: '',
188 | from: 1640995000,
189 | to: 1640996000,
190 | sort: 'timestamp', // ascending order
191 | })
192 | const response = (await toolHandlers.list_traces(
193 | request,
194 | )) as unknown as DatadogToolResponse
195 |
196 | expect(response.content[0].text).toContain('span-id-oldest')
197 | expect(response.content[0].text).toContain('span-id-newest')
198 | })()
199 |
200 | server.close()
201 | })
202 |
203 | it('should handle empty response', async () => {
204 | const mockHandler = http.post(tracesEndpoint, async () => {
205 | return HttpResponse.json({
206 | data: [],
207 | meta: {
208 | page: {},
209 | },
210 | })
211 | })
212 |
213 | const server = setupServer(mockHandler)
214 |
215 | await server.boundary(async () => {
216 | const request = createMockToolRequest('list_traces', {
217 | query: 'service:non-existent',
218 | from: 1640995000,
219 | to: 1640996000,
220 | })
221 | const response = (await toolHandlers.list_traces(
222 | request,
223 | )) as unknown as DatadogToolResponse
224 |
225 | expect(response.content[0].text).toContain('Traces:')
226 | expect(response.content[0].text).toContain('count":0')
227 | expect(response.content[0].text).toContain('traces":[]')
228 | })()
229 |
230 | server.close()
231 | })
232 |
233 | it('should handle null response data', async () => {
234 | const mockHandler = http.post(tracesEndpoint, async () => {
235 | return HttpResponse.json({
236 | data: null,
237 | meta: {
238 | page: {},
239 | },
240 | })
241 | })
242 |
243 | const server = setupServer(mockHandler)
244 |
245 | await server.boundary(async () => {
246 | const request = createMockToolRequest('list_traces', {
247 | query: '',
248 | from: 1640995000,
249 | to: 1640996000,
250 | })
251 | await expect(toolHandlers.list_traces(request)).rejects.toThrow(
252 | 'No traces data returned',
253 | )
254 | })()
255 |
256 | server.close()
257 | })
258 |
259 | it('should handle authentication errors', async () => {
260 | const mockHandler = http.post(tracesEndpoint, async () => {
261 | return HttpResponse.json(
262 | { errors: ['Authentication failed'] },
263 | { status: 403 },
264 | )
265 | })
266 |
267 | const server = setupServer(mockHandler)
268 |
269 | await server.boundary(async () => {
270 | const request = createMockToolRequest('list_traces', {
271 | query: '',
272 | from: 1640995000,
273 | to: 1640996000,
274 | })
275 | await expect(toolHandlers.list_traces(request)).rejects.toThrow()
276 | })()
277 |
278 | server.close()
279 | })
280 |
281 | it('should handle rate limit errors', async () => {
282 | const mockHandler = http.post(tracesEndpoint, async () => {
283 | return HttpResponse.json(
284 | { errors: ['Rate limit exceeded'] },
285 | { status: 429 },
286 | )
287 | })
288 |
289 | const server = setupServer(mockHandler)
290 |
291 | await server.boundary(async () => {
292 | const request = createMockToolRequest('list_traces', {
293 | query: '',
294 | from: 1640995000,
295 | to: 1640996000,
296 | })
297 | await expect(toolHandlers.list_traces(request)).rejects.toThrow(
298 | /errors./,
299 | )
300 | })()
301 |
302 | server.close()
303 | })
304 | })
305 | })
306 |
--------------------------------------------------------------------------------
/tests/utils/datadog.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest'
2 | import {
3 | ApiKeyAuthAuthentication,
4 | AppKeyAuthAuthentication,
5 | } from '@datadog/datadog-api-client/dist/packages/datadog-api-client-common'
6 | import { createDatadogConfig, getDatadogSite } from '../../src/utils/datadog'
7 |
8 | describe('createDatadogConfig', () => {
9 | it('should create a datadog config with custom site when DATADOG_SITE is configured', () => {
10 | const datadogConfig = createDatadogConfig({
11 | apiKeyAuth: 'test-api-key',
12 | appKeyAuth: 'test-app-key',
13 | site: 'us3.datadoghq.com',
14 | })
15 | expect(datadogConfig.authMethods).toEqual({
16 | apiKeyAuth: new ApiKeyAuthAuthentication('test-api-key'),
17 | appKeyAuth: new AppKeyAuthAuthentication('test-app-key'),
18 | })
19 | expect(datadogConfig.servers[0]?.getConfiguration()?.site).toBe(
20 | 'us3.datadoghq.com',
21 | )
22 | })
23 |
24 | it('should create a datadog config with default site when DATADOG_SITE is not configured', () => {
25 | const datadogConfig = createDatadogConfig({
26 | apiKeyAuth: 'test-api-key',
27 | appKeyAuth: 'test-app-key',
28 | })
29 | expect(datadogConfig.authMethods).toEqual({
30 | apiKeyAuth: new ApiKeyAuthAuthentication('test-api-key'),
31 | appKeyAuth: new AppKeyAuthAuthentication('test-app-key'),
32 | })
33 | expect(datadogConfig.servers[0]?.getConfiguration()?.site).toBe(
34 | 'datadoghq.com',
35 | )
36 | })
37 |
38 | it('should throw an error when DATADOG_API_KEY are not configured', () => {
39 | expect(() =>
40 | createDatadogConfig({
41 | apiKeyAuth: '',
42 | appKeyAuth: 'test-app-key',
43 | }),
44 | ).toThrow('Datadog API key and APP key are required')
45 | })
46 |
47 | it('should throw an error when DATADOG_APP_KEY are not configured', () => {
48 | expect(() =>
49 | createDatadogConfig({
50 | apiKeyAuth: 'test-api-key',
51 | appKeyAuth: '',
52 | }),
53 | ).toThrow('Datadog API key and APP key are required')
54 | })
55 | })
56 |
57 | describe('getDatadogSite', () => {
58 | it('should return custom site when DATADOG_SITE is configured', () => {
59 | const datadogConfig = createDatadogConfig({
60 | apiKeyAuth: 'test-api-key',
61 | appKeyAuth: 'test-app-key',
62 | site: 'us3.datadoghq.com',
63 | })
64 | const site = getDatadogSite(datadogConfig)
65 | expect(site).toBe('us3.datadoghq.com')
66 | })
67 |
68 | it('should return default site when DATADOG_SITE is not configured', () => {
69 | const datadogConfig = createDatadogConfig({
70 | apiKeyAuth: 'test-api-key',
71 | appKeyAuth: 'test-app-key',
72 | })
73 | const site = getDatadogSite(datadogConfig)
74 | expect(site).toBe('datadoghq.com')
75 | })
76 | })
77 |
--------------------------------------------------------------------------------
/tests/utils/tool.test.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest'
2 | import { Tool } from '@modelcontextprotocol/sdk/types.js'
3 | import { createToolSchema } from '../../src/utils/tool'
4 | import { z } from 'zod'
5 |
6 | describe('createToolSchema', () => {
7 | it('should generate tool schema with correct inputSchema when definitions exist', () => {
8 | // Create a dummy schema with a matching definition for the tool name
9 | const dummySchema = z.object({
10 | foo: z.string().describe('foo description'),
11 | bar: z.number().describe('bar description').optional(),
12 | baz: z.boolean().describe('baz description').default(false),
13 | qux: z.number().describe('qux description').min(10).max(20).default(15),
14 | })
15 |
16 | // Call createToolSchema with the dummy schema, tool name, and description
17 | const gotTool = createToolSchema(
18 | dummySchema,
19 | 'test',
20 | 'dummy test description',
21 | )
22 |
23 | // Expected inputSchema based on the dummy schema
24 | const expectedInputSchema: Tool = {
25 | name: 'test',
26 | description: 'dummy test description',
27 | inputSchema: {
28 | type: 'object',
29 | properties: {
30 | foo: {
31 | type: 'string',
32 | description: 'foo description',
33 | },
34 | bar: {
35 | type: 'number',
36 | description: 'bar description',
37 | },
38 | baz: {
39 | type: 'boolean',
40 | description: 'baz description',
41 | default: false,
42 | },
43 | qux: {
44 | type: 'number',
45 | description: 'qux description',
46 | default: 15,
47 | minimum: 10,
48 | maximum: 20,
49 | },
50 | },
51 | required: ['foo'],
52 | },
53 | }
54 |
55 | // Verify the returned tool object matches expected structure
56 | expect(gotTool).toEqual(expectedInputSchema)
57 | })
58 | })
59 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "esnext",
4 | "lib": ["esnext"],
5 | "module": "esnext",
6 | "moduleResolution": "bundler",
7 | "outDir": "./build",
8 | "rootDir": "./src",
9 | "strict": true,
10 | "esModuleInterop": true,
11 | "skipLibCheck": true,
12 | "forceConsistentCasingInFileNames": true,
13 | "resolveJsonModule": true
14 | },
15 | "include": ["src/**/*"],
16 | "exclude": ["node_modules"]
17 | }
18 |
--------------------------------------------------------------------------------
/tsup.config.ts:
--------------------------------------------------------------------------------
1 | export default {
2 | entry: ['src/index.ts'],
3 | dts: true,
4 | format: ['esm'],
5 | outDir: 'build',
6 | }
7 |
--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vitest/config'
2 |
3 | export default defineConfig({
4 | test: {
5 | globals: true,
6 | environment: 'node',
7 | setupFiles: ['./tests/setup.ts'],
8 | include: ['./tests/**/*.test.ts'],
9 | coverage: {
10 | provider: 'v8',
11 | reporter: ['text', 'json', 'html'],
12 | include: ['src/**/*.ts'],
13 | exclude: ['node_modules/', 'tests/'],
14 | },
15 | },
16 | })
17 |
--------------------------------------------------------------------------------