├── .gitignore ├── assets └── metrics-monkey.jpeg ├── jest.config.js ├── tsconfig.json ├── src ├── types │ └── index.ts ├── utils │ ├── duration.ts │ └── output.ts ├── cli │ └── index.ts └── core │ └── benchmark.ts ├── LICENSE ├── package.json ├── tests ├── output.test.ts └── benchmark.test.ts └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | dist/ 3 | coverage/ 4 | package-lock.json -------------------------------------------------------------------------------- /assets/metrics-monkey.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AmalChandru/metrics-monkey/HEAD/assets/metrics-monkey.jpeg -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | preset: 'ts-jest', 3 | testEnvironment: 'node', 4 | transform: { 5 | '^.+\\.tsx?$': 'ts-jest', 6 | }, 7 | testMatch: ['**/tests/**/*.test.ts'], 8 | collectCoverage: true, 9 | }; 10 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES6", 4 | "module": "commonjs", 5 | "strict": true, 6 | "esModuleInterop": true, 7 | "skipLibCheck": true, 8 | "outDir": "./dist" 9 | }, 10 | "include": ["src/**/*"] 11 | } 12 | -------------------------------------------------------------------------------- /src/types/index.ts: -------------------------------------------------------------------------------- 1 | export interface BenchmarkOptions { 2 | url: string; 3 | method: 'GET' | 'POST' | 'PUT' | 'DELETE'; 4 | headers?: Record; 5 | body?: any; 6 | requests: number; 7 | concurrency: number; 8 | timeout: number; 9 | duration?: number; 10 | output?: 'plain' | 'json'; 11 | } 12 | -------------------------------------------------------------------------------- /src/utils/duration.ts: -------------------------------------------------------------------------------- 1 | export function formatDuration(milliseconds: number): string { 2 | const seconds = Math.floor(milliseconds / 1000); 3 | const minutes = Math.floor(seconds / 60); 4 | const hours = Math.floor(minutes / 60); 5 | const days = Math.floor(hours / 24); 6 | 7 | const formatted = [ 8 | days > 0 ? `${days}d` : null, 9 | hours % 24 > 0 ? `${hours % 24}h` : null, 10 | minutes % 60 > 0 ? `${minutes % 60}m` : null, 11 | seconds % 60 > 0 ? `${seconds % 60}s` : null, 12 | ].filter(Boolean).join(' '); 13 | 14 | return formatted || '0s'; 15 | } 16 | -------------------------------------------------------------------------------- /src/utils/output.ts: -------------------------------------------------------------------------------- 1 | export function formatStatistics(stats: { 2 | requestsPerSecond: number; 3 | avgLatency: number; 4 | stdevLatency: number; 5 | maxLatency: number; 6 | statusCodes: Record; 7 | throughput: number; 8 | }): string { 9 | let output = '\n'; 10 | output += 'Benchmark Results\n'; 11 | output += '------------------------------\n'; 12 | output += ` ${'Requests per Second:'.padEnd(20)} ${stats.requestsPerSecond.toFixed(2)}\n`; 13 | output += ` ${'Latency (ms):'.padEnd(20)}\n`; 14 | output += ` ${'Avg:'.padEnd(10)} ${stats.avgLatency.toFixed(2)}ms\n`; 15 | output += ` ${'Stdev:'.padEnd(10)} ${stats.stdevLatency.toFixed(2)}ms\n`; 16 | output += ` ${'Max:'.padEnd(10)} ${stats.maxLatency.toFixed(2)}ms\n`; 17 | output += ` ${'Throughput (MB/s):'.padEnd(20)} ${stats.throughput.toFixed(2)}\n`; 18 | output += ` ${'HTTP Codes:'.padEnd(20)}\n`; 19 | 20 | Object.keys(stats.statusCodes).forEach(code => { 21 | output += ` ${code}xx: ${stats.statusCodes[parseInt(code, 10)]}\n`; 22 | }); 23 | 24 | output += '------------------------------\n'; 25 | 26 | return output; 27 | } 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Amal Chandran M V 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "metrics-monkey", 3 | "version": "1.0.1", 4 | "description": "The metrics-monkey 🐒🚀 is a high-performance CLI tool for easy and effective HTTP benchmarking", 5 | "main": "dist/cli/index.js", 6 | "bin": { 7 | "metrics-monkey": "./dist/cli/index.js" 8 | }, 9 | "scripts": { 10 | "build": "tsc", 11 | "test": "jest", 12 | "prepublishOnly": "npm run build" 13 | }, 14 | "keywords": ["cli", "benchmarking", "load-testing", "performance-testing", "http", "typescript"], 15 | "author": "Amal Chandru", 16 | "license": "ISC", 17 | "repository": { 18 | "type": "git", 19 | "url": "https://github.com/AmalChandru/metrics-monkey.git" 20 | }, 21 | "bugs": { 22 | "url": "https://github.com/AmalChandru/metrics-monkey/issues" 23 | }, 24 | "dependencies": { 25 | "@types/cli-progress": "^3.11.6", 26 | "@types/node": "^22.0.2", 27 | "axios": "^1.7.3", 28 | "cli-progress": "^3.12.0", 29 | "ts-node": "^10.9.2", 30 | "typescript": "^5.5.4", 31 | "yargs": "^17.7.2" 32 | }, 33 | "devDependencies": { 34 | "@types/jest": "^29.5.12", 35 | "@types/yargs": "^17.0.32", 36 | "jest": "^29.7.0", 37 | "ts-jest": "^29.2.4" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/cli/index.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | import { Benchmark } from '../core/benchmark'; 4 | import { BenchmarkOptions } from '../types'; 5 | import * as yargs from 'yargs'; 6 | 7 | // Parse command-line arguments using yargs 8 | const argv = yargs 9 | .usage('Usage: metrics-monkey [options]') 10 | .option('url', { type: 'string', demandOption: true, describe: 'The URL to benchmark' }) 11 | .option('method', { type: 'string', default: 'GET', describe: 'HTTP method' }) 12 | .option('headers', { type: 'string', default: '{}', describe: 'HTTP headers in JSON format' }) 13 | .option('body', { type: 'string', default: '', describe: 'Request body' }) 14 | .option('requests', { type: 'number', demandOption: true, describe: 'Number of requests to send' }) 15 | .option('concurrency', { type: 'number', demandOption: true, describe: 'Number of concurrent connections' }) 16 | .option('timeout', { type: 'number', default: 10, describe: 'Request timeout in seconds' }) 17 | .option('duration', { type: 'number', default: 0, describe: 'Benchmark duration in milliseconds' }) 18 | .option('output', { type: 'string', default: 'plain', choices: ['plain', 'json'], describe: 'Output format' }) 19 | .help() 20 | .argv as yargs.Arguments; 21 | 22 | const options: BenchmarkOptions = { 23 | url: argv.url as string, 24 | method: (argv.method as string).toUpperCase() as 'GET' | 'POST' | 'PUT' | 'DELETE', 25 | headers: JSON.parse(argv.headers as string), 26 | body: argv.body as string, 27 | requests: argv.requests as number, 28 | concurrency: argv.concurrency as number, 29 | timeout: argv.timeout as number, 30 | duration: argv.duration as number, 31 | output: argv.output as 'plain' | 'json', 32 | }; 33 | 34 | // Create an instance of Benchmark and run it 35 | const benchmark = new Benchmark(options); 36 | benchmark.run(); 37 | -------------------------------------------------------------------------------- /tests/output.test.ts: -------------------------------------------------------------------------------- 1 | import { formatStatistics } from '../src/utils/output'; 2 | 3 | describe('formatStatistics', () => { 4 | test('should format statistics correctly for plain text', () => { 5 | const stats = { 6 | requestsPerSecond: 1500, 7 | tps: 1500, 8 | avgLatency: 20, 9 | stdevLatency: 5, 10 | maxLatency: 50, 11 | statusCodes: { 200: 1000 }, 12 | throughput: 0.5 13 | }; 14 | 15 | const formatted = formatStatistics(stats); 16 | expect(formatted).toContain('Requests per Second: 1500.00'); 17 | expect(formatted).toContain('Latency (ms):'); 18 | expect(formatted).toContain('Avg: 20.00ms'); 19 | expect(formatted).toContain('Stdev: 5.00ms'); 20 | expect(formatted).toContain('Max: 50.00ms'); 21 | expect(formatted).toContain('Throughput (MB/s): 0.50'); 22 | expect(formatted).toContain('HTTP Codes:'); 23 | expect(formatted).toContain('200xx: 1000'); 24 | }); 25 | 26 | test('should handle zero values correctly', () => { 27 | const stats = { 28 | requestsPerSecond: 0, 29 | tps: 0, 30 | avgLatency: 0, 31 | stdevLatency: 0, 32 | maxLatency: 0, 33 | statusCodes: { 200: 0 }, 34 | throughput: 0 35 | }; 36 | 37 | const formatted = formatStatistics(stats); 38 | expect(formatted).toContain('Requests per Second: 0.00'); 39 | expect(formatted).toContain('Latency (ms):'); 40 | expect(formatted).toContain('Avg: 0.00ms'); 41 | expect(formatted).toContain('Stdev: 0.00ms'); 42 | expect(formatted).toContain('Max: 0.00ms'); 43 | expect(formatted).toContain('Throughput (MB/s): 0.00'); 44 | expect(formatted).toContain('HTTP Codes:'); 45 | expect(formatted).toContain('200xx: 0'); 46 | }); 47 | 48 | test('should handle missing status codes correctly', () => { 49 | const stats = { 50 | requestsPerSecond: 1500, 51 | tps: 1500, 52 | avgLatency: 20, 53 | stdevLatency: 5, 54 | maxLatency: 50, 55 | statusCodes: {}, 56 | throughput: 0.5 57 | }; 58 | 59 | const formatted = formatStatistics(stats); 60 | expect(formatted).toContain('Requests per Second: 1500.00'); 61 | expect(formatted).toContain('Latency (ms):'); 62 | expect(formatted).toContain('Avg: 20.00ms'); 63 | expect(formatted).toContain('Stdev: 5.00ms'); 64 | expect(formatted).toContain('Max: 50.00ms'); 65 | expect(formatted).toContain('Throughput (MB/s): 0.50'); 66 | expect(formatted).toContain('HTTP Codes:'); 67 | expect(formatted).toContain('No HTTP Codes'); 68 | }); 69 | 70 | test('should handle multiple status codes correctly', () => { 71 | const stats = { 72 | requestsPerSecond: 1500, 73 | tps: 1500, 74 | avgLatency: 20, 75 | stdevLatency: 5, 76 | maxLatency: 50, 77 | statusCodes: { 200: 1000, 404: 50, 500: 10 }, 78 | throughput: 0.5 79 | }; 80 | 81 | const formatted = formatStatistics(stats); 82 | expect(formatted).toContain('Requests per Second: 1500.00'); 83 | expect(formatted).toContain('Latency (ms):'); 84 | expect(formatted).toContain('Avg: 20.00ms'); 85 | expect(formatted).toContain('Stdev: 5.00ms'); 86 | expect(formatted).toContain('Max: 50.00ms'); 87 | expect(formatted).toContain('Throughput (MB/s): 0.50'); 88 | expect(formatted).toContain('HTTP Codes:'); 89 | expect(formatted).toContain('200xx: 1000'); 90 | expect(formatted).toContain('404xx: 50'); 91 | expect(formatted).toContain('500xx: 10'); 92 | }); 93 | }); -------------------------------------------------------------------------------- /src/core/benchmark.ts: -------------------------------------------------------------------------------- 1 | import axios from 'axios'; 2 | import { performance } from 'perf_hooks'; 3 | import { SingleBar, Presets } from 'cli-progress'; 4 | import { BenchmarkOptions } from '../types'; 5 | import { formatStatistics } from '../utils/output'; 6 | import { formatDuration } from '../utils/duration'; 7 | 8 | export class Benchmark { 9 | private options: BenchmarkOptions; 10 | private progressBar: SingleBar; 11 | private totalRequests: number; 12 | private responseTimes: number[] = []; 13 | private statusCodes: Record = {}; 14 | private startTime: number; 15 | 16 | constructor(options: BenchmarkOptions) { 17 | this.options = options; 18 | this.progressBar = new SingleBar({}, Presets.legacy); 19 | this.totalRequests = options.requests; 20 | this.startTime = performance.now(); 21 | } 22 | 23 | async run() { 24 | const { url, method, headers, body, requests, concurrency, duration } = this.options; 25 | 26 | this.progressBar.start(requests, 0); 27 | 28 | const promises = []; 29 | for (let i = 0; i < concurrency; i++) { 30 | promises.push(this.makeRequests(Math.ceil(requests / concurrency))); 31 | } 32 | 33 | if (duration) { 34 | setTimeout(() => { 35 | this.progressBar.stop(); 36 | this.printStatistics(); 37 | process.exit(0); 38 | }, duration); 39 | } 40 | 41 | const results = await Promise.all(promises); 42 | 43 | this.progressBar.stop(); 44 | this.printStatistics(); 45 | } 46 | 47 | private async makeRequests(count: number) { 48 | const results = []; 49 | for (let i = 0; i < count; i++) { 50 | const start = performance.now(); 51 | try { 52 | const response = await axios({ 53 | method: this.options.method, 54 | url: this.options.url, 55 | headers: this.options.headers, 56 | data: this.options.body, 57 | timeout: this.options.timeout * 1000, 58 | }); 59 | const duration = performance.now() - start; 60 | this.recordResponse(response.status, duration); 61 | results.push(duration); 62 | } catch (error: unknown) { 63 | if (axios.isAxiosError(error)) { 64 | const duration = performance.now() - start; 65 | this.recordResponse(error.response ? error.response.status : 0, duration); 66 | results.push(duration); 67 | } else { 68 | console.error('Unexpected error:', error); 69 | throw error; 70 | } 71 | } 72 | this.progressBar.increment(); 73 | } 74 | return results; 75 | } 76 | 77 | private recordResponse(statusCode: number, duration: number) { 78 | this.responseTimes.push(duration); 79 | this.statusCodes[statusCode] = (this.statusCodes[statusCode] || 0) + 1; 80 | } 81 | 82 | private printStatistics() { 83 | const endTime = performance.now(); 84 | const totalTime = (endTime - this.startTime) / 1000; // in seconds 85 | const requestsPerSecond = this.totalRequests / totalTime; 86 | const latencyStats = this.calculateLatencyStats(); 87 | const throughput = this.calculateThroughput(); 88 | 89 | const stats = { 90 | requestsPerSecond, 91 | avgLatency: latencyStats.avg, 92 | stdevLatency: latencyStats.stdev, 93 | maxLatency: latencyStats.max, 94 | statusCodes: this.statusCodes, 95 | throughput 96 | }; 97 | 98 | console.log(formatStatistics(stats)); 99 | console.log(`Total time: ${formatDuration(totalTime * 1000)}`); 100 | } 101 | 102 | private calculateLatencyStats() { 103 | const avg = this.responseTimes.reduce((sum, time) => sum + time, 0) / this.responseTimes.length; 104 | const stdev = Math.sqrt(this.responseTimes.reduce((sum, time) => sum + Math.pow(time - avg, 2), 0) / this.responseTimes.length); 105 | const max = Math.max(...this.responseTimes); 106 | return { avg, stdev, max: max / 1000 }; // Convert max to milliseconds 107 | } 108 | 109 | private calculateThroughput() { 110 | // Assuming body is the size of each request in bytes if not empty 111 | const requestSize = this.options.body ? Buffer.byteLength(this.options.body) : 0; 112 | const totalSize = this.totalRequests * requestSize; // in bytes 113 | const totalTime = (performance.now() - this.startTime) / 1000; // in seconds 114 | const throughputMB = (totalSize / totalTime) / (1024 * 1024); // MB/s 115 | return throughputMB; 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # metrics-monkey 🐒🚀 2 | 3 |

4 | metrics-monkey logo 5 |
6 | M. Monkey generated by Llama 3.1 7 |

8 | 9 | The `metrics-monkey` is a no-nonsense, high-performance CLI HTTP benchmarking tool that's a swiss army knife for developers and performance engineers who want to: 10 | 11 | - 📈 **Supercharge Web Performance:** Benchmark your web services to spot performance bottlenecks and optimize your application for top-notch speed and reliability. 12 | - 🔄 **Test API Endpoints:** Simulate various traffic patterns, including high concurrency, large number of requests, and prolonged durations, to ensure your API endpoints can handle the load. 13 | - 🔍 **Assess API Dependencies:** Verify if the APIs you depend on can withstand the load and determine their maximum capacity. 14 | 15 | ## Installation 16 | 17 | You can install metrics-monkey via npm: 18 | ```bash 19 | npm install -g metrics-monkey 20 | ``` 21 | 22 | ## Usage 23 | 24 | ```bash 25 | metrics-monkey --url --method --requests --concurrency --timeout --duration --headers --body --output 26 | ``` 27 | 28 | **Options:** 29 | - `--help:` Display the help message with all available options and their descriptions. 30 | - `--url:` The URL to benchmark. 31 | - `--method:` The HTTP method to use (GET, POST, PUT, DELETE). 32 | - `--requests:` The total number of requests to make. 33 | - `--concurrency:` The number of concurrent requests. 34 | - `--timeout:` The timeout for each request in seconds. 35 | - `--duration:` The duration of the test in milliseconds. 36 | - `--headers:` The headers to include with each request (in JSON format). 37 | - `--body:` The body content to include with POST or PUT requests. 38 | - `--output:` The output format (plain or json). 39 | 40 | ## Examples 41 | ### 1: Basic GET Request Benchmark 42 | ```bash 43 | metrics-monkey --url https://jsonplaceholder.typicode.com/posts --method GET --requests 1000 --concurrency 50 --timeout 10 --duration 60000 44 | ``` 45 | 46 | **Description:** Benchmarks a GET request to https://jsonplaceholder.typicode.com/posts. The test will issue 1000 requests with a concurrency of 50, a timeout of 10 seconds per request, and will run for 6 seconds. 47 | 48 | **Example Output:** 49 | ```bash 50 | progress [======================================--] 94% | ETA: 1s | 943/1000 51 | 52 | Benchmark Results 53 | ------------------------------ 54 | Requests per Second: 163.67 55 | Latency (ms): 56 | Avg: 250.38ms 57 | Stdev: 341.99ms 58 | Max: 2.26ms 59 | Throughput (MB/s): 0.00 60 | HTTP Codes: 61 | 200xx: 943 62 | ------------------------------ 63 | 64 | Total time: 6s 65 | ``` 66 | 67 | ### 2: POST Request with Custom Headers 68 | ```bash 69 | metrics-monkey --url https://api.example.com/data --method POST --requests 500 --concurrency 20 --timeout 5 --headers '{"Authorization": "Bearer my-token"}' --body '{"key": "value"}' 70 | ``` 71 | **Description:** Sends 500 POST requests to https://api.example.com/data, including a custom Authorization header and a JSON body. The tool will use a concurrency of 20, a timeout of 5 seconds per request 72 | 73 | **Example Output:** 74 | ```bash 75 | progress [========================================] 100% | ETA: 0s | 500/500 76 | 77 | Benchmark Results 78 | ------------------------------ 79 | Requests per Second: 75.41 80 | Latency (ms): 81 | Avg: 260.07ms 82 | Stdev: 304.49ms 83 | Max: 1.99ms 84 | Throughput (MB/s): 0.00 85 | HTTP Codes: 86 | 0xx: 500 87 | ------------------------------ 88 | 89 | Total time: 6s 90 | ``` 91 | ### 3: PUT Request with Extended Duration 92 | ```bash 93 | metrics-monkey --url https://api.example.com/update --method PUT --requests 2000 --concurrency 100 --timeout 15 --duration 120000 94 | ``` 95 | **Description:** Performs a PUT request to https://api.example.com/update with 2000 requests and a concurrency of 100. The test will run for 2 minutes (120,000 milliseconds) with each request timing out after 15 seconds. The results will be displayed in plain text format. 96 | 97 | **Example Output:** 98 | ```bash 99 | progress [========================================] 100% | ETA: 0s | 2000/2000 100 | 101 | Benchmark Results 102 | ------------------------------ 103 | Requests per Second: 38.01 104 | Latency (ms): 105 | Avg: 2595.13ms 106 | Stdev: 3039.69ms 107 | Max: 12.60ms 108 | Throughput (MB/s): 0.00 109 | HTTP Codes: 110 | 0xx: 2000 111 | ------------------------------ 112 | 113 | Total time: 52s 114 | ``` 115 | -------------------------------------------------------------------------------- /tests/benchmark.test.ts: -------------------------------------------------------------------------------- 1 | import axios from 'axios'; 2 | import { Benchmark } from '../src/core/benchmark'; 3 | import { BenchmarkOptions } from '../src/types'; 4 | import { formatStatistics } from '../src/utils/output'; 5 | 6 | jest.mock('axios'); 7 | const mockedAxios = axios as jest.MockedFunction; 8 | 9 | describe('Benchmark', () => { 10 | let benchmark: Benchmark; 11 | 12 | beforeEach(() => { 13 | const options: BenchmarkOptions = { 14 | url: 'https://jsonplaceholder.typicode.com/posts', 15 | method: 'GET', 16 | headers: {}, 17 | body: '', 18 | requests: 100, 19 | concurrency: 10, 20 | timeout: 5, 21 | duration: 60000, 22 | output: 'plain' 23 | }; 24 | benchmark = new Benchmark(options); 25 | }); 26 | 27 | test('should initialize with correct options', () => { 28 | expect(benchmark).toBeDefined(); 29 | expect(benchmark['options'].url).toBe('https://jsonplaceholder.typicode.com/posts'); 30 | }); 31 | 32 | test('should handle successful requests', async () => { 33 | mockedAxios.mockResolvedValueOnce({ status: 200, data: {} }); 34 | 35 | await benchmark.run(); 36 | 37 | expect(mockedAxios).toHaveBeenCalled(); 38 | expect(mockedAxios).toHaveBeenCalledWith(expect.objectContaining({ 39 | method: 'GET', 40 | url: 'https://jsonplaceholder.typicode.com/posts', 41 | })); 42 | }); 43 | 44 | test('should handle failed requests', async () => { 45 | mockedAxios.mockRejectedValueOnce({ response: { status: 500 } }); 46 | 47 | await benchmark.run(); 48 | 49 | expect(mockedAxios).toHaveBeenCalled(); 50 | expect(mockedAxios).toHaveBeenCalledWith(expect.objectContaining({ 51 | method: 'GET', 52 | url: 'https://jsonplaceholder.typicode.com/posts', 53 | })); 54 | }); 55 | 56 | test('should handle timeout correctly', async () => { 57 | mockedAxios.mockImplementationOnce(() => new Promise((_, reject) => setTimeout(() => reject({ response: { status: 408 } }), 6000))); 58 | 59 | await benchmark.run(); 60 | 61 | expect(mockedAxios).toHaveBeenCalled(); 62 | expect(mockedAxios).toHaveBeenCalledWith(expect.objectContaining({ 63 | method: 'GET', 64 | url: 'https://jsonplaceholder.typicode.com/posts', 65 | })); 66 | }); 67 | 68 | test('should handle different HTTP methods', async () => { 69 | const options: BenchmarkOptions = { 70 | url: 'https://jsonplaceholder.typicode.com/posts', 71 | method: 'POST', 72 | headers: { 'Content-Type': 'application/json' }, 73 | body: JSON.stringify({ title: 'foo', body: 'bar', userId: 1 }), 74 | requests: 100, 75 | concurrency: 10, 76 | timeout: 5, 77 | duration: 60000, 78 | output: 'plain' 79 | }; 80 | benchmark = new Benchmark(options); 81 | 82 | mockedAxios.mockResolvedValueOnce({ status: 201, data: {} }); 83 | 84 | await benchmark.run(); 85 | 86 | expect(mockedAxios).toHaveBeenCalled(); 87 | expect(mockedAxios).toHaveBeenCalledWith(expect.objectContaining({ 88 | method: 'POST', 89 | url: 'https://jsonplaceholder.typicode.com/posts', 90 | data: JSON.stringify({ title: 'foo', body: 'bar', userId: 1 }), 91 | })); 92 | }); 93 | 94 | test('should handle invalid URL', async () => { 95 | const options: BenchmarkOptions = { 96 | url: 'invalid-url', 97 | method: 'GET', 98 | headers: {}, 99 | body: '', 100 | requests: 100, 101 | concurrency: 10, 102 | timeout: 5, 103 | duration: 60000, 104 | output: 'plain' 105 | }; 106 | benchmark = new Benchmark(options); 107 | 108 | await expect(benchmark.run()).rejects.toThrow('Invalid URL'); 109 | }); 110 | 111 | test('should calculate statistics correctly', () => { 112 | // Simulate some response times 113 | benchmark['responseTimes'] = [20, 25, 30, 15, 35]; 114 | benchmark['statusCodes'] = { 200: 100, 500: 5 }; 115 | 116 | const stats = benchmark['calculateLatencyStats'](); 117 | expect(stats.avg).toBeCloseTo(25, 1); 118 | expect(stats.stdev).toBeCloseTo(7.5, 1); 119 | expect(stats.max).toBe(35); 120 | 121 | const throughput = benchmark['calculateThroughput'](); 122 | expect(throughput).toBeGreaterThan(0); 123 | }); 124 | 125 | test('should format statistics correctly', () => { 126 | const stats = { 127 | requestsPerSecond: 1500, 128 | tps: 1500, 129 | avgLatency: 20, 130 | stdevLatency: 5, 131 | maxLatency: 50, 132 | statusCodes: { 200: 1000 }, 133 | throughput: 0.5 134 | }; 135 | 136 | const formatted = formatStatistics(stats); 137 | expect(formatted).toContain('Requests per Second: 1500.00'); 138 | expect(formatted).toContain('TPS: 1500.00'); 139 | expect(formatted).toContain('Latency (ms):'); 140 | expect(formatted).toContain('Avg: 20.00ms'); 141 | expect(formatted).toContain('Stdev: 5.00ms'); 142 | expect(formatted).toContain('Max: 50.00ms'); 143 | expect(formatted).toContain('Throughput (MB/s): 0.50'); 144 | expect(formatted).toContain('HTTP Codes:'); 145 | expect(formatted).toContain('200xx: 1000'); 146 | }); 147 | 148 | test('should output results to file when --output flag is used', async () => { 149 | const fs = require('fs'); 150 | const mockWriteFile = jest.spyOn(fs, 'writeFileSync').mockImplementation(() => {}); 151 | 152 | benchmark['options'].output = 'json'; // Or 'plain' 153 | 154 | await benchmark.run(); 155 | 156 | expect(mockWriteFile).toHaveBeenCalled(); 157 | mockWriteFile.mockRestore(); 158 | }); 159 | }); --------------------------------------------------------------------------------