├── .gitignore
├── integrations
├── __init__.py
├── __pycache__
│ ├── sentry.cpython-311.pyc
│ ├── tinydb.cpython-311.pyc
│ └── __init__.cpython-311.pyc
├── sentry.py
└── tinydb.py
├── litellm_uuid.txt
├── requirements.txt
├── .DS_Store
├── __pycache__
└── utils.cpython-311.pyc
├── admin-dashboard
├── src
│ └── app
│ │ ├── favicon.ico
│ │ ├── layout.js
│ │ ├── globals.css
│ │ ├── components
│ │ ├── Navbar.js
│ │ ├── Table.js
│ │ └── Logs.js
│ │ ├── page.js
│ │ └── api
│ │ └── route.js
├── jsconfig.json
├── next.config.js
├── postcss.config.js
├── .gitignore
├── package.json
├── public
│ ├── vercel.svg
│ └── next.svg
├── README.md
├── tailwind.config.js
└── package-lock.json
├── Dockerfile
├── test_proxy_stream.py
├── LICENSE
├── models_info.json
├── main.py
├── readme.md
├── utils.py
└── request_logs.json
/.gitignore:
--------------------------------------------------------------------------------
1 | .env
--------------------------------------------------------------------------------
/integrations/__init__.py:
--------------------------------------------------------------------------------
1 | from . import *
--------------------------------------------------------------------------------
/litellm_uuid.txt:
--------------------------------------------------------------------------------
1 | 6526fd1e-0fc1-42da-a520-ce96270cc85d
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | flask
2 | flask_cors
3 | waitress
4 | litellm==0.1.381
--------------------------------------------------------------------------------
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Div-Infinity/liteLLM-proxy/HEAD/.DS_Store
--------------------------------------------------------------------------------
/__pycache__/utils.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Div-Infinity/liteLLM-proxy/HEAD/__pycache__/utils.cpython-311.pyc
--------------------------------------------------------------------------------
/admin-dashboard/src/app/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Div-Infinity/liteLLM-proxy/HEAD/admin-dashboard/src/app/favicon.ico
--------------------------------------------------------------------------------
/admin-dashboard/jsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "paths": {
4 | "@/*": ["./src/*"]
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/admin-dashboard/next.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('next').NextConfig} */
2 | const nextConfig = {}
3 |
4 | module.exports = nextConfig
5 |
--------------------------------------------------------------------------------
/admin-dashboard/postcss.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | plugins: {
3 | tailwindcss: {},
4 | autoprefixer: {},
5 | },
6 | }
7 |
--------------------------------------------------------------------------------
/integrations/__pycache__/sentry.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Div-Infinity/liteLLM-proxy/HEAD/integrations/__pycache__/sentry.cpython-311.pyc
--------------------------------------------------------------------------------
/integrations/__pycache__/tinydb.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Div-Infinity/liteLLM-proxy/HEAD/integrations/__pycache__/tinydb.cpython-311.pyc
--------------------------------------------------------------------------------
/integrations/__pycache__/__init__.cpython-311.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Div-Infinity/liteLLM-proxy/HEAD/integrations/__pycache__/__init__.cpython-311.pyc
--------------------------------------------------------------------------------
/admin-dashboard/src/app/layout.js:
--------------------------------------------------------------------------------
1 | import './globals.css'
2 | import { Inter } from 'next/font/google'
3 |
4 | const inter = Inter({ subsets: ['latin'] })
5 |
6 | export const metadata = {
7 | title: 'Create Next App',
8 | description: 'Generated by create next app',
9 | }
10 |
11 | export default function RootLayout({ children }) {
12 | return (
13 |
14 |
{children}
15 |
16 | )
17 | }
18 |
--------------------------------------------------------------------------------
/admin-dashboard/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # next.js
12 | /.next/
13 | /out/
14 |
15 | # production
16 | /build
17 |
18 | # misc
19 | .DS_Store
20 | *.pem
21 |
22 | # debug
23 | npm-debug.log*
24 | yarn-debug.log*
25 | yarn-error.log*
26 |
27 | # local env files
28 | .env*.local
29 |
30 | # vercel
31 | .vercel
32 |
33 | # typescript
34 | *.tsbuildinfo
35 | next-env.d.ts
36 |
--------------------------------------------------------------------------------
/admin-dashboard/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "admin-dashboard",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "next dev",
7 | "build": "next build",
8 | "start": "next start",
9 | "lint": "next lint"
10 | },
11 | "dependencies": {
12 | "@heroicons/react": "^1.0.6",
13 | "@tremor/react": "^3.6.1",
14 | "autoprefixer": "10.4.15",
15 | "next": "13.4.17",
16 | "postcss": "8.4.28",
17 | "react": "18.2.0",
18 | "react-dom": "18.2.0",
19 | "tailwindcss": "3.3.3"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use a recent version of Python as the base image
2 | FROM python:3.8-slim-buster
3 |
4 | # Set the working directory to /app
5 | WORKDIR /app
6 |
7 | # Copy the requirements.txt file to the image
8 | COPY requirements.txt .
9 |
10 | # Install the required packages
11 |
12 | # Install the required Python packages using pip
13 | RUN pip install --no-cache-dir -r requirements.txt
14 |
15 | # Copy the rest of the application files to the image
16 | COPY . .
17 |
18 | # Expose port 5000 for the Flask app to listen on
19 | EXPOSE 5000
20 |
21 | # Run the main.py file when the container is started
22 | CMD ["python", "main.py"]
--------------------------------------------------------------------------------
/admin-dashboard/public/vercel.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/admin-dashboard/src/app/globals.css:
--------------------------------------------------------------------------------
1 | @tailwind base;
2 | @tailwind components;
3 | @tailwind utilities;
4 |
5 | :root {
6 | --foreground-rgb: 0, 0, 0;
7 | --background-start-rgb: 214, 219, 220;
8 | --background-end-rgb: 255, 255, 255;
9 | }
10 |
11 | /* @media (prefers-color-scheme: dark) {
12 | :root {
13 | --foreground-rgb: 255, 255, 255;
14 | --background-start-rgb: 0, 0, 0;
15 | --background-end-rgb: 0, 0, 0;
16 | }
17 | } */
18 |
19 | body {
20 | color: rgb(var(--foreground-rgb));
21 | background: linear-gradient(
22 | to bottom,
23 | transparent,
24 | rgb(var(--background-end-rgb))
25 | )
26 | rgb(var(--background-start-rgb));
27 | }
28 |
--------------------------------------------------------------------------------
/test_proxy_stream.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import os
3 |
4 | os.environ["OPENAI_API_KEY"] = "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"
5 |
6 | openai.api_key = os.environ["OPENAI_API_KEY"]
7 | openai.api_base ="http://localhost:4000"
8 |
9 | messages = [
10 | {
11 | "role": "user",
12 | "content": "write a 1 pg essay in liteLLM"
13 | }
14 | ]
15 |
16 | # response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, stream=True)
17 | response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, user="krrish@berri.ai")
18 | # response = openai.ChatCompletion.create(model="command-nightly", messages=messages, user="ishaan@berri.ai")
19 | # response = openai.ChatCompletion.create(model="claude-instant-1", messages=messages, user="peter@berri.ai")
20 | print("got response", response)
21 | # response is a generator
22 |
23 | # for chunk in response:
24 | # print(chunk)
--------------------------------------------------------------------------------
/integrations/sentry.py:
--------------------------------------------------------------------------------
1 | #### What this does ####
2 | # If an error occurs capture it + it's breadcrumbs with Sentry
3 |
4 | import dotenv, os
5 | import requests
6 | dotenv.load_dotenv() # Loading env variables using dotenv
7 | import traceback
8 | import datetime, subprocess, sys
9 |
10 | class Sentry:
11 | def __init__(self):
12 | # Instance variables
13 | try:
14 | import sentry_sdk
15 | except ImportError:
16 | subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'sentry_sdk'])
17 | import sentry_sdk
18 | sentry_sdk_instance = sentry_sdk
19 | self.sentry_trace_rate = os.environ.get("SENTRY_API_TRACE_RATE") if "SENTRY_API_TRACE_RATE" in os.environ else "1.0"
20 | sentry_sdk_instance.init(dsn=os.environ.get("SENTRY_API_URL"), traces_sample_rate=float(self.sentry_trace_rate))
21 | self.capture_exception = sentry_sdk_instance.capture_exception
22 | self.add_breadcrumb = sentry_sdk_instance.add_breadcrumb
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Berri AI
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/admin-dashboard/public/next.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/admin-dashboard/README.md:
--------------------------------------------------------------------------------
1 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app).
2 |
3 | ## Getting Started
4 |
5 | First, run the development server:
6 |
7 | ```bash
8 | npm run dev
9 | # or
10 | yarn dev
11 | # or
12 | pnpm dev
13 | ```
14 |
15 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
16 |
17 | You can start editing the page by modifying `app/page.js`. The page auto-updates as you edit the file.
18 |
19 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font.
20 |
21 | ## Learn More
22 |
23 | To learn more about Next.js, take a look at the following resources:
24 |
25 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
26 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
27 |
28 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome!
29 |
30 | ## Deploy on Vercel
31 |
32 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
33 |
34 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details.
35 |
--------------------------------------------------------------------------------
/models_info.json:
--------------------------------------------------------------------------------
1 |
2 | {
3 | "gpt-3.5-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002},
4 | "gpt-35-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002},
5 | "gpt-3.5-turbo-0613": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002},
6 | "gpt-3.5-turbo-0301": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002},
7 | "gpt-3.5-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004},
8 | "gpt-35-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004},
9 | "gpt-3.5-turbo-16k-0613": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004},
10 | "gpt-4": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006},
11 | "gpt-4-0613": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006},
12 | "gpt-4-32k": {"max_tokens": 8000, "input_cost_per_token": 0.00006, "output_cost_per_token": 0.00012},
13 | "claude-instant-1": {"max_tokens": 100000, "input_cost_per_token": 0.00000163, "output_cost_per_token": 0.00000551},
14 | "claude-2": {"max_tokens": 100000, "input_cost_per_token": 0.00001102, "output_cost_per_token": 0.00003268},
15 | "text-bison-001": {"max_tokens": 8192, "input_cost_per_token": 0.000004, "output_cost_per_token": 0.000004},
16 | "chat-bison-001": {"max_tokens": 4096, "input_cost_per_token": 0.000002, "output_cost_per_token": 0.000002},
17 | "command-nightly": {"max_tokens": 4096, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000015}
18 | }
--------------------------------------------------------------------------------
/admin-dashboard/src/app/components/Navbar.js:
--------------------------------------------------------------------------------
1 | 'use client'
2 | import Link from 'next/link';
3 | import Image from 'next/image'
4 | import React, { useState } from 'react';
5 | function Navbar() {
6 | return (
7 |
28 | )
29 | }
30 |
31 | export default Navbar;
--------------------------------------------------------------------------------
/admin-dashboard/src/app/page.js:
--------------------------------------------------------------------------------
1 | "use client"
2 | import React, { useEffect, useState } from 'react';
3 | import Image from 'next/image'
4 | import Navbar from './components/Navbar';
5 | import Table from './components/Table';
6 | import Logs from './components/Logs';
7 | import { BadgeDelta, Card, Col, Grid, DeltaType, Flex, Metric, ProgressBar, Text } from "@tremor/react";
8 | import { Title, LineChart, BarChart} from "@tremor/react";
9 |
10 | const spend_per_project = [
11 | {
12 | "spend": 1000,
13 | "project": "QA App",
14 | },
15 | {
16 | "spend": 500,
17 | "project": "LLM App Playground"
18 | },
19 | {
20 | "spend": 1500,
21 | "project": "Code Gen Tool"
22 | }
23 | ]
24 |
25 | export default function Home() {
26 | const [data, setData] = useState([]);
27 |
28 | useEffect(() => {
29 | // call your API route to fetch the data
30 | fetch('/api/')
31 | .then(res => res.json())
32 | .then(resData => setData(resData))
33 | }, []);
34 |
35 | return (
36 |
37 |
38 |
39 |
40 |
41 | Spend per project
42 | Total Spend
43 | $ 3000
44 |
52 |
53 | {data && }
54 |
55 | {data && }
56 |
57 |
58 |
59 | )
60 | }
--------------------------------------------------------------------------------
/admin-dashboard/src/app/components/Table.js:
--------------------------------------------------------------------------------
1 | import { StatusOnlineIcon } from "@heroicons/react/outline";
2 | import {
3 | Card,
4 | Table,
5 | TableHead,
6 | TableRow,
7 | TableHeaderCell,
8 | TableBody,
9 | TableCell,
10 | Text,
11 | Title,
12 | Badge,
13 | } from "@tremor/react";
14 |
15 | const placeholder_data = [
16 | {
17 | "name": "krrish@berri.ai",
18 | "project": "QA App",
19 | "total_cost": 100,
20 | "status": "rate-limited"
21 | },
22 | {
23 | "name": "ishaan@berri.ai",
24 | "project": "Code Gen Tool",
25 | "total_cost": 75,
26 | "status": "near limit"
27 | },
28 | {
29 | "name": "peter@berri.ai",
30 | "project": "LLM App Playground",
31 | "total_cost": 20,
32 | "status": "normal"
33 | }
34 | ]
35 |
36 | export default (props) => {
37 | console.log("table props: ", props)
38 | return (
39 |
40 | Top Users
41 |
42 |
43 |
44 | ID
45 | Project
46 | Total Cost
47 | Status
48 |
49 |
50 | {props.data && typeof props.data === 'object' &&
51 |
52 | {Object.entries(placeholder_data).map(([key, value]) => (
53 |
54 | {value.name}
55 | {value.project}
56 | {value.total_cost}
57 |
58 | {value.status == "rate-limited" ?
59 |
60 | {value.status}
61 |
62 | : value.status == "near limit" ?
63 |
64 | {value.status}
65 |
66 | : value.status == "normal" ?
67 |
68 | {value.status}
69 |
70 | : null
71 | }
72 |
73 |
74 | ))}
75 |
76 | }
77 |
78 |
79 | )};
--------------------------------------------------------------------------------
/admin-dashboard/src/app/api/route.js:
--------------------------------------------------------------------------------
1 | // app/api/route.js 👈🏽
2 |
3 | import { NextResponse } from 'next/server'
4 | import fs from 'fs';
5 | import path from 'path';
6 |
7 | export async function GET(req, res) {
8 | // Specify the path to your JSON file
9 | const filepath = path.resolve(process.cwd(), '../', 'request_logs.json');
10 |
11 | try {
12 | // Read the file synchronously
13 | const rawJsonData = fs.readFileSync(filepath, 'utf8');
14 |
15 | // Parse the file to a JavaScript Object
16 | const jsonData = JSON.parse(rawJsonData);
17 | const finalResponse = {}
18 | const transformedData = [];
19 | let total_cost = 0
20 | let spend_per_key = {}
21 | for (const [key, value] of Object.entries(jsonData._default)) {
22 | total_cost += value.total_cost
23 | if (!spend_per_key.hasOwnProperty(value.request_key)) {
24 | // Create a new key-value pair
25 | let new_key = value.request_key
26 | spend_per_key[new_key] = value.total_cost;
27 | } else {
28 | let new_key = value.request_key
29 | spend_per_key[new_key] += value.total_cost;
30 | }
31 |
32 | let date = new Date(value.created_at*1000);
33 |
34 | // Format the date into YYYY-MM-DD
35 | let formattedDate = `${date.getFullYear()}-${('0' + (date.getMonth()+1)).slice(-2)}-${('0' + date.getDate()).slice(-2)}`;
36 |
37 | // Extract the hours, minutes, and day of the week from the date
38 | let hours = date.getHours();
39 | let minutes = date.getMinutes();
40 | let dayOfWeek = date.getDay();
41 |
42 | // Find index of specific object
43 | var foundIndex = transformedData.findIndex(x => x.time == formattedDate && x.hours == hours && x.minutes == minutes && x.dayOfWeek == dayOfWeek);
44 |
45 | if (foundIndex === -1) {
46 | transformedData.push({
47 | time: formattedDate,
48 | hours: hours,
49 | minutes: minutes,
50 | dayOfWeek: dayOfWeek,
51 | 'number of requests': 1 // logging for each request in our logs
52 | });
53 | } else {
54 | transformedData[foundIndex]['number of requests']++;
55 | }
56 | }
57 |
58 |
59 |
60 | console.log("transformedData: ", transformedData)
61 | finalResponse["daily_requests"] = transformedData
62 | finalResponse["total_cost"] = total_cost
63 | finalResponse["spend_per_key"] = spend_per_key
64 | finalResponse["logs"] = Object.values(jsonData._default);
65 | console.log("finalResponse: ", finalResponse)
66 | // Return the processed data in the API response
67 | return NextResponse.json(finalResponse)
68 | res.status(200).json(transformedData);
69 | } catch (err) {
70 | console.error("Failed to load or process file: ", err);
71 | Response('Error reading data', {status: 500})
72 | }
73 | }
--------------------------------------------------------------------------------
/admin-dashboard/src/app/components/Logs.js:
--------------------------------------------------------------------------------
1 | import { StatusOnlineIcon } from "@heroicons/react/outline";
2 | import {
3 | Card,
4 | Table,
5 | TableHead,
6 | TableRow,
7 | TableHeaderCell,
8 | TableBody,
9 | TableCell,
10 | Text,
11 | Title,
12 | Badge,
13 | } from "@tremor/react";
14 |
15 | const data = [
16 | {
17 | name: "Viola Amherd",
18 | Role: "Federal Councillor",
19 | departement: "The Federal Department of Defence, Civil Protection and Sport (DDPS)",
20 | status: "active",
21 | },
22 | {
23 | name: "Simonetta Sommaruga",
24 | Role: "Federal Councillor",
25 | departement:
26 | "The Federal Department of the Environment, Transport, Energy and Communications (DETEC)",
27 | status: "active",
28 | },
29 | {
30 | name: "Alain Berset",
31 | Role: "Federal Councillor",
32 | departement: "The Federal Department of Home Affairs (FDHA)",
33 | status: "active",
34 | },
35 | {
36 | name: "Ignazio Cassis",
37 | Role: "Federal Councillor",
38 | departement: "The Federal Department of Foreign Affairs (FDFA)",
39 | status: "active",
40 | },
41 | {
42 | name: "Ueli Maurer",
43 | Role: "Federal Councillor",
44 | departement: "The Federal Department of Finance (FDF)",
45 | status: "active",
46 | },
47 | {
48 | name: "Guy Parmelin",
49 | Role: "Federal Councillor",
50 | departement: "The Federal Department of Economic Affairs, Education and Research (EAER)",
51 | status: "active",
52 | },
53 | {
54 | name: "Karin Keller-Sutter",
55 | Role: "Federal Councillor",
56 | departement: "The Federal Department of Justice and Police (FDJP)",
57 | status: "active",
58 | },
59 | ];
60 |
61 | export default (props) => {
62 | console.log("log props: ", props.data)
63 | return (
64 |
65 | Request Logs
66 |
67 |
68 |
69 | model
70 | request
71 | response
72 | cost per query
73 | response time
74 | project key
75 |
76 |
77 |
78 | {props.data && typeof props.data === 'object' &&
79 | Object.entries(props.data).map(([key, value]) => (
80 |
81 | {console.log(value)}
82 | {value.model}
83 | {value.messages.map(item => item.content).join(' ')}
84 |
85 | {value.response.substring(0,50)}
86 |
87 |
88 | {value.total_cost.toFixed(5)}
89 |
90 |
91 | {value.response_time}
92 |
93 |
94 | {value.request_key}
95 |
96 |
97 | ))}
98 |
99 |
100 |
101 | )};
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import traceback
2 | from flask import Flask, request, jsonify, abort, Response
3 | from flask_cors import CORS
4 | import traceback
5 | import litellm
6 | import threading
7 | from litellm import completion
8 | from utils import handle_error, get_cache, add_cache, Logging
9 | import os, dotenv, time
10 | import json
11 | dotenv.load_dotenv()
12 |
13 | # TODO: set your keys in .env or here:
14 | # os.environ["OPENAI_API_KEY"] = "" # set your openai key here
15 | # see supported models / keys here: https://litellm.readthedocs.io/en/latest/supported/
16 | ######### ENVIRONMENT VARIABLES ##########
17 | verbose = True
18 |
19 | ############ HELPER FUNCTIONS ###################################
20 |
21 | def print_verbose(print_statement):
22 | if verbose:
23 | print(print_statement)
24 |
25 | ######### LOGGING ###################
26 | # # log your data to slack, supabase
27 | successful_callbacks = ["tinydb"]
28 |
29 | ######### ERROR MONITORING ##########
30 | # log errors to slack, sentry, supabase
31 | # litellm.failure_callback=["slack", "sentry", "supabase"] # .env SENTRY_API_URL
32 | failure_callbacks = ["tinydb", "sentry"]
33 |
34 |
35 |
36 | request_logging = Logging(successful_callbacks=successful_callbacks, failure_callbacks=failure_callbacks, verbose=verbose)
37 |
38 | app = Flask(__name__)
39 | CORS(app)
40 |
41 | @app.route('/')
42 | def index():
43 | return 'received!', 200
44 |
45 | def data_generator(response):
46 | for chunk in response:
47 | yield f"data: {json.dumps(chunk)}\n\n"
48 |
49 | @app.route('/chat/completions', methods=["POST"])
50 | def api_completion():
51 | data = request.json
52 | start_time = time.time()
53 | if data.get('stream') == "True":
54 | data['stream'] = True # convert to boolean
55 | try:
56 | ## User-based rate-limiting
57 | ### Check if user id passed in
58 | ### if so -> check key + user combination - if it's a miss, get the user's current status from the db
59 | ### Key based limits
60 | ## Check if key has quota -> check in hot-cache, if it's a miss, get it from the db for the next call
61 | ## LOGGING
62 | request_logging.on_request_start(data)
63 | # COMPLETION CALL
64 | print(f"data: {data}")
65 | response = completion(**data)
66 | print_verbose(f"Got Response: {response}")
67 | ## LOG SUCCESS
68 | end_time = time.time()
69 | threading.Thread(target=request_logging.on_request_success, args=(data, request.headers.get('Authorization'), response, start_time, end_time)).start()
70 | if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
71 | return Response(data_generator(response), mimetype='text/event-stream')
72 | except Exception as e:
73 | # call handle_error function
74 | print_verbose(f"Got Error api_completion(): {traceback.format_exc()}")
75 | ## LOG FAILURE
76 | end_time = time.time()
77 | traceback_exception = traceback.format_exc()
78 | request_logging.on_request_failure(e, traceback_exception, data, request.headers.get('Authorization'), start_time, end_time) # don't do this threaded - else sentry's capture exception will save the wrong input params (since we're doing model fallbacks)
79 | # raise e
80 | return handle_error(data, request_logging=request_logging, auth_headers=request.headers.get('Authorization'), start_time=start_time)
81 |
82 | print_verbose(f"final response: {response}")
83 | print_verbose(f"type of final response: {type(response)}")
84 | return response
85 | @app.route('/get_models', methods=["POST"])
86 | def get_models():
87 | try:
88 | return litellm.model_list
89 | except Exception as e:
90 | traceback.print_exc()
91 | response = {"error": str(e)}
92 | return response, 200
93 |
94 | if __name__ == "__main__":
95 | from waitress import serve
96 | serve(app, host="0.0.0.0", port=4000, threads=500)
97 |
98 | # ############ Caching ###################################
99 | # # make a new endpoint with caching
100 | # # This Cache is built using ChromaDB
101 | # # it has two functions add_cache() and get_cache()
102 | # @app.route('/chat/completions_with_cache', methods=["POST"])
103 | # def api_completion_with_cache():
104 | # data = request.json
105 | # try:
106 | # cache_response = get_cache(data['messages'])
107 | # if cache_response!=None:
108 | # return cache_response
109 | # # pass in data to completion function, unpack data
110 | # response = completion(**data)
111 |
112 | # # add to cache
113 | # except Exception as e:
114 | # # call handle_error function
115 | # return handle_error(data)
116 | # return response, 200
117 |
118 |
--------------------------------------------------------------------------------
/admin-dashboard/tailwind.config.js:
--------------------------------------------------------------------------------
1 | /** @type {import('tailwindcss').Config} */
2 | /* eslint-disable max-len */
3 | module.exports = {
4 | content: [
5 | "./src/**/*.{js,ts,jsx,tsx}",
6 | "./node_modules/@tremor/**/*.{js,ts,jsx,tsx}",
7 | ],
8 | theme: {
9 | transparent: "transparent",
10 | current: "currentColor",
11 | extend: {
12 | colors: {
13 | // light mode
14 | tremor: {
15 | brand: {
16 | faint: "#eff6ff", // blue-50
17 | muted: "#bfdbfe", // blue-200
18 | subtle: "#60a5fa", // blue-400
19 | DEFAULT: "#3b82f6", // blue-500
20 | emphasis: "#1d4ed8", // blue-700
21 | inverted: "#ffffff", // white
22 | },
23 | background: {
24 | muted: "#f9fafb", // gray-50
25 | subtle: "#f3f4f6", // gray-100
26 | DEFAULT: "#ffffff", // white
27 | emphasis: "#374151", // gray-700
28 | },
29 | border: {
30 | DEFAULT: "#e5e7eb", // gray-200
31 | },
32 | ring: {
33 | DEFAULT: "#e5e7eb", // gray-200
34 | },
35 | content: {
36 | subtle: "#9ca3af", // gray-400
37 | DEFAULT: "#6b7280", // gray-500
38 | emphasis: "#374151", // gray-700
39 | strong: "#111827", // gray-900
40 | inverted: "#ffffff", // white
41 | },
42 | },
43 | // dark mode
44 | "dark-tremor": {
45 | brand: {
46 | faint: "#0B1229", // custom
47 | muted: "#172554", // blue-950
48 | subtle: "#1e40af", // blue-800
49 | DEFAULT: "#3b82f6", // blue-500
50 | emphasis: "#60a5fa", // blue-400
51 | inverted: "#030712", // gray-950
52 | },
53 | background: {
54 | muted: "#131A2B", // custom
55 | subtle: "#1f2937", // gray-800
56 | DEFAULT: "#111827", // gray-900
57 | emphasis: "#d1d5db", // gray-300
58 | },
59 | border: {
60 | DEFAULT: "#1f2937", // gray-800
61 | },
62 | ring: {
63 | DEFAULT: "#1f2937", // gray-800
64 | },
65 | content: {
66 | subtle: "#4b5563", // gray-600
67 | DEFAULT: "#6b7280", // gray-600
68 | emphasis: "#e5e7eb", // gray-200
69 | strong: "#f9fafb", // gray-50
70 | inverted: "#000000", // black
71 | },
72 | },
73 | },
74 | boxShadow: {
75 | // light
76 | "tremor-input": "0 1px 2px 0 rgb(0 0 0 / 0.05)",
77 | "tremor-card": "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)",
78 | "tremor-dropdown": "0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1)",
79 | // dark
80 | "dark-tremor-input": "0 1px 2px 0 rgb(0 0 0 / 0.05)",
81 | "dark-tremor-card": "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)",
82 | "dark-tremor-dropdown": "0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1)",
83 | },
84 | borderRadius: {
85 | "tremor-small": "0.375rem",
86 | "tremor-default": "0.5rem",
87 | "tremor-full": "9999px",
88 | },
89 | fontSize: {
90 | "tremor-label": ["0.75rem"],
91 | "tremor-default": ["0.875rem", { lineHeight: "1.25rem" }],
92 | "tremor-title": ["1.125rem", { lineHeight: "1.75rem" }],
93 | "tremor-metric": ["1.875rem", { lineHeight: "2.25rem" }],
94 | },
95 | },
96 | },
97 | safelist: [
98 | {
99 | pattern:
100 | /^(bg-(?:slate|gray|zinc|neutral|stone|red|orange|amber|yellow|lime|green|emerald|teal|cyan|sky|blue|indigo|violet|purple|fuchsia|pink|rose)-(?:50|100|200|300|400|500|600|700|800|900|950))$/,
101 | variants: ["hover", "ui-selected"],
102 | },
103 | {
104 | pattern:
105 | /^(text-(?:slate|gray|zinc|neutral|stone|red|orange|amber|yellow|lime|green|emerald|teal|cyan|sky|blue|indigo|violet|purple|fuchsia|pink|rose)-(?:50|100|200|300|400|500|600|700|800|900|950))$/,
106 | variants: ["hover", "ui-selected"],
107 | },
108 | {
109 | pattern:
110 | /^(border-(?:slate|gray|zinc|neutral|stone|red|orange|amber|yellow|lime|green|emerald|teal|cyan|sky|blue|indigo|violet|purple|fuchsia|pink|rose)-(?:50|100|200|300|400|500|600|700|800|900|950))$/,
111 | variants: ["hover", "ui-selected"],
112 | },
113 | {
114 | pattern:
115 | /^(ring-(?:slate|gray|zinc|neutral|stone|red|orange|amber|yellow|lime|green|emerald|teal|cyan|sky|blue|indigo|violet|purple|fuchsia|pink|rose)-(?:50|100|200|300|400|500|600|700|800|900|950))$/,
116 | },
117 | {
118 | pattern:
119 | /^(stroke-(?:slate|gray|zinc|neutral|stone|red|orange|amber|yellow|lime|green|emerald|teal|cyan|sky|blue|indigo|violet|purple|fuchsia|pink|rose)-(?:50|100|200|300|400|500|600|700|800|900|950))$/,
120 | },
121 | {
122 | pattern:
123 | /^(fill-(?:slate|gray|zinc|neutral|stone|red|orange|amber|yellow|lime|green|emerald|teal|cyan|sky|blue|indigo|violet|purple|fuchsia|pink|rose)-(?:50|100|200|300|400|500|600|700|800|900|950))$/,
124 | },
125 | ],
126 | plugins: [require("@headlessui/tailwindcss")],
127 | };
128 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 |
2 | # liteLLM Proxy Server: 50+ LLM Models, Error Handling, Caching
3 | ### Azure, Llama2, OpenAI, Claude, Hugging Face, Replicate Models
4 | [](https://pypi.org/project/litellm/)
5 | [](https://pypi.org/project/litellm/0.1.1/)
6 | 
7 | [](https://github.com/BerriAI/litellm)
8 |
9 | [](https://railway.app/template/DYqQAW?referralCode=t3ukrU)
10 |
11 | ## What does liteLLM proxy do
12 | - Make `/chat/completions` requests for 50+ LLM models **Azure, OpenAI, Replicate, Anthropic, Hugging Face**
13 |
14 | Example: for `model` use `claude-2`, `gpt-3.5`, `gpt-4`, `command-nightly`, `stabilityai/stablecode-completion-alpha-3b-4k`
15 | ```json
16 | {
17 | "model": "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1",
18 | "messages": [
19 | {
20 | "content": "Hello, whats the weather in San Francisco??",
21 | "role": "user"
22 | }
23 | ]
24 | }
25 | ```
26 | - **Consistent Input/Output** Format
27 | - Call all models using the OpenAI format - `completion(model, messages)`
28 | - Text responses will always be available at `['choices'][0]['message']['content']`
29 | - **Error Handling** Using Model Fallbacks (if `GPT-4` fails, try `llama2`)
30 | - **Logging** - Log Requests, Responses and Errors to `Supabase`, `Posthog`, `Mixpanel`, `Sentry`, `Helicone` (Any of the supported providers here: https://litellm.readthedocs.io/en/latest/advanced/
31 |
32 | **Example: Logs sent to Supabase**
33 |
34 |
35 | - **Token Usage & Spend** - Track Input + Completion tokens used + Spend/model
36 | - **Caching** - Implementation of Semantic Caching
37 | - **Streaming & Async Support** - Return generators to stream text responses
38 |
39 |
40 | ## API Endpoints
41 |
42 | ### `/chat/completions` (POST)
43 |
44 | This endpoint is used to generate chat completions for 50+ support LLM API Models. Use llama2, GPT-4, Claude2 etc
45 |
46 | #### Input
47 | This API endpoint accepts all inputs in raw JSON and expects the following inputs
48 | - `model` (string, required): ID of the model to use for chat completions. See all supported models [here]: (https://litellm.readthedocs.io/en/latest/supported/):
49 | eg `gpt-3.5-turbo`, `gpt-4`, `claude-2`, `command-nightly`, `stabilityai/stablecode-completion-alpha-3b-4k`
50 | - `messages` (array, required): A list of messages representing the conversation context. Each message should have a `role` (system, user, assistant, or function), `content` (message text), and `name` (for function role).
51 | - Additional Optional parameters: `temperature`, `functions`, `function_call`, `top_p`, `n`, `stream`. See the full list of supported inputs here: https://litellm.readthedocs.io/en/latest/input/
52 |
53 |
54 | #### Example JSON body
55 | For claude-2
56 | ```json
57 | {
58 | "model": "claude-2",
59 | "messages": [
60 | {
61 | "content": "Hello, whats the weather in San Francisco??",
62 | "role": "user"
63 | }
64 | ]
65 |
66 | }
67 | ```
68 |
69 | ### Making an API request to the Proxy Server
70 | ```python
71 | import requests
72 | import json
73 |
74 | # TODO: use your URL
75 | url = "http://localhost:5000/chat/completions"
76 |
77 | payload = json.dumps({
78 | "model": "gpt-3.5-turbo",
79 | "messages": [
80 | {
81 | "content": "Hello, whats the weather in San Francisco??",
82 | "role": "user"
83 | }
84 | ]
85 | })
86 | headers = {
87 | 'Content-Type': 'application/json'
88 | }
89 | response = requests.request("POST", url, headers=headers, data=payload)
90 | print(response.text)
91 |
92 | ```
93 |
94 | ### Output [Response Format]
95 | Responses from the server are given in the following format.
96 | All responses from the server are returned in the following format (for all LLM models). More info on output here: https://litellm.readthedocs.io/en/latest/output/
97 | ```json
98 | {
99 | "choices": [
100 | {
101 | "finish_reason": "stop",
102 | "index": 0,
103 | "message": {
104 | "content": "I'm sorry, but I don't have the capability to provide real-time weather information. However, you can easily check the weather in San Francisco by searching online or using a weather app on your phone.",
105 | "role": "assistant"
106 | }
107 | }
108 | ],
109 | "created": 1691790381,
110 | "id": "chatcmpl-7mUFZlOEgdohHRDx2UpYPRTejirzb",
111 | "model": "gpt-3.5-turbo-0613",
112 | "object": "chat.completion",
113 | "usage": {
114 | "completion_tokens": 41,
115 | "prompt_tokens": 16,
116 | "total_tokens": 57
117 | }
118 | }
119 | ```
120 |
121 | ## Installation & Usage
122 | ### Running Locally
123 | 1. Clone liteLLM repository to your local machine:
124 | ```
125 | git clone https://github.com/BerriAI/liteLLM-proxy
126 | ```
127 | 2. Install the required dependencies using pip
128 | ```
129 | pip install requirements.txt
130 | ```
131 | 3. Set your LLM API keys
132 | ```
133 | os.environ['OPENAI_API_KEY]` = "YOUR_API_KEY"
134 | or
135 | set OPENAI_API_KEY in your .env file
136 | ```
137 | 4. Run the server:
138 | ```
139 | python main.py
140 | ```
141 |
142 |
143 |
144 | Deploying
145 | 1. Quick Start: Deploy on Railway
146 |
147 | [](https://railway.app/template/DYqQAW?referralCode=t3ukrU)
148 |
149 | 2. `GCP`, `AWS`, `Azure`
150 | This project includes a `Dockerfile` allowing you to build and deploy a Docker Project on your providers
151 |
152 |
153 |
154 |
155 |
156 |
--------------------------------------------------------------------------------
/integrations/tinydb.py:
--------------------------------------------------------------------------------
1 | #### What this does ####
2 | # Log events to a local db - TinyDB, for easy cost tracking
3 |
4 | import dotenv, os
5 | import requests
6 | dotenv.load_dotenv() # Loading env variables using dotenv
7 | import traceback
8 | import datetime, subprocess, sys
9 |
10 | model_cost = {
11 | "gpt-3.5-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002},
12 | "gpt-35-turbo": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002}, # azure model name
13 | "gpt-3.5-turbo-0613": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002},
14 | "gpt-3.5-turbo-0301": {"max_tokens": 4000, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002},
15 | "gpt-3.5-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004},
16 | "gpt-35-turbo-16k": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004}, # azure model name
17 | "gpt-3.5-turbo-16k-0613": {"max_tokens": 16000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000004},
18 | "gpt-4": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006},
19 | "gpt-4-0613": {"max_tokens": 8000, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.00006},
20 | "gpt-4-32k": {"max_tokens": 8000, "input_cost_per_token": 0.00006, "output_cost_per_token": 0.00012},
21 | "claude-instant-1": {"max_tokens": 100000, "input_cost_per_token": 0.00000163, "output_cost_per_token": 0.00000551},
22 | "claude-2": {"max_tokens": 100000, "input_cost_per_token": 0.00001102, "output_cost_per_token": 0.00003268},
23 | "text-bison-001": {"max_tokens": 8192, "input_cost_per_token": 0.000004, "output_cost_per_token": 0.000004},
24 | "chat-bison-001": {"max_tokens": 4096, "input_cost_per_token": 0.000002, "output_cost_per_token": 0.000002},
25 | "command-nightly": {"max_tokens": 4096, "input_cost_per_token": 0.000015, "output_cost_per_token": 0.000015},
26 | }
27 |
28 | class TinyDB:
29 | # Class variables or attributes
30 | tinydb_table_name = "request_logs"
31 | def __init__(self):
32 | # Instance variables
33 | try:
34 | import tinydb
35 | from tinydb import TinyDB
36 | except ImportError:
37 | subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'tinydb'])
38 | from tinydb import TinyDB
39 | self.db = TinyDB(f'{self.tinydb_table_name}.json')
40 |
41 | def price_calculator(self, model, response_obj, start_time, end_time):
42 | # try and find if the model is in the model_cost map
43 | # else default to the average of the costs
44 | prompt_tokens_cost_usd_dollar = 0
45 | completion_tokens_cost_usd_dollar = 0
46 | if model in model_cost:
47 | prompt_tokens_cost_usd_dollar = model_cost[model]["input_cost_per_token"] * response_obj["usage"]["prompt_tokens"]
48 | completion_tokens_cost_usd_dollar = model_cost[model]["output_cost_per_token"] * response_obj["usage"]["completion_tokens"]
49 | elif "replicate" in model:
50 | # replicate models are charged based on time
51 | # llama 2 runs on an nvidia a100 which costs $0.0032 per second - https://replicate.com/replicate/llama-2-70b-chat
52 | model_run_time = end_time - start_time # assuming time in seconds
53 | cost_usd_dollar = model_run_time * 0.0032
54 | prompt_tokens_cost_usd_dollar = cost_usd_dollar / 2
55 | completion_tokens_cost_usd_dollar = cost_usd_dollar / 2
56 | else:
57 | # calculate average input cost
58 | input_cost_sum = 0
59 | output_cost_sum = 0
60 | for model in model_cost:
61 | input_cost_sum += model_cost[model]["input_cost_per_token"]
62 | output_cost_sum += model_cost[model]["output_cost_per_token"]
63 | avg_input_cost = input_cost_sum / len(model_cost.keys())
64 | avg_output_cost = output_cost_sum / len(model_cost.keys())
65 | prompt_tokens_cost_usd_dollar = model_cost[model]["input_cost_per_token"] * response_obj["usage"]["prompt_tokens"]
66 | completion_tokens_cost_usd_dollar = model_cost[model]["output_cost_per_token"] * response_obj["usage"]["completion_tokens"]
67 | return prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar
68 |
69 | def log_event(self, model, messages, user, request_key, response_obj, start_time, end_time, print_verbose):
70 | try:
71 | print_verbose(f"TinyDB Logging - Enters logging function for model {model}, response_obj: {response_obj}", level=2)
72 |
73 | prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = self.price_calculator(model, response_obj, start_time, end_time)
74 | total_cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar
75 |
76 | response_time = end_time-start_time
77 | if "choices" in response_obj:
78 | tinydb_data_obj = {
79 | "request_key": request_key.replace("Bearer ", ""),
80 | "response_time": response_time,
81 | "user": user,
82 | "created_at": response_obj["created"],
83 | "model": response_obj["model"],
84 | "total_cost": total_cost,
85 | "messages": messages,
86 | "response": response_obj['choices'][0]['message']['content']
87 | }
88 | print_verbose(f"TinyDB Logging - final data object: {tinydb_data_obj}", level=2)
89 | self.db.insert(tinydb_data_obj)
90 | elif "error" in response_obj:
91 | tinydb_data_obj = {
92 | "response_time": response_time,
93 | "user": user,
94 | "created_at": response_obj["created"],
95 | "model": response_obj["model"],
96 | "total_cost": total_cost,
97 | "messages": messages,
98 | "error": response_obj['error']
99 | }
100 | print_verbose(f"TinyDB Logging - final data object: {tinydb_data_obj}", level=2)
101 | self.db.insert(tinydb_data_obj)
102 |
103 | except:
104 | traceback.print_exc()
105 | print_verbose(f"TinyDB Logging Error - {traceback.format_exc()}", level=2)
106 | pass
107 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import time, random, sys, subprocess, threading
2 | from copy import deepcopy
3 | import litellm
4 | from litellm import completion, embedding
5 | import os, dotenv, traceback
6 | import json
7 | dotenv.load_dotenv()
8 | from integrations.tinydb import TinyDB
9 | from integrations.sentry import Sentry
10 | ######### ENVIRONMNET VARIABLES ##########
11 | callback_list = []
12 | tinyDBClient = None
13 | backup_keys = {key: "" for key in litellm.provider_list}
14 | for key in backup_keys:
15 | if key == "openai":
16 | backup_keys[key] = os.getenv("OPENAI_BACKUP_API_KEY")
17 | elif key == "cohere":
18 | backup_keys[key] = os.getenv("COHERE_BACKUP_API_KEY")
19 | elif key == "anthropic":
20 | backup_keys[key] = os.getenv("ANTHROPIC_BACKUP_API_KEY")
21 | elif key == "replicate":
22 | backup_keys[key] = os.getenv("REPLICATE_BACKUP_API_KEY")
23 | elif key == "huggingface":
24 | backup_keys[key] = os.getenv("HUGGINGFACE_BACKUP_API_KEY")
25 | elif key == "together_ai":
26 | backup_keys[key] = os.getenv("TOGETHERAI_BACKUP_API_KEY")
27 | elif key == "vertex_ai":
28 | backup_keys[key] = os.getenv("VERTEXAI_BACKUP_API_KEY")
29 | elif key == "ai21":
30 | backup_keys[key] = os.getenv("AI21_BACKUP_API_KEY")
31 | ########### streaming ############################
32 | def generate_responses(response):
33 | for chunk in response:
34 | yield json.dumps({"response": chunk}) + "\n"
35 |
36 | ################ ERROR HANDLING #####################
37 | # implement model fallbacks, cooldowns, and retries
38 | # if a model fails assume it was rate limited and let it cooldown for 60s
39 | def handle_error(data, request_logging, auth_headers, start_time):
40 | # retry completion() request with fallback models
41 | response = None
42 | data.pop("model")
43 | rate_limited_models = set()
44 | model_expiration_times = {}
45 | fallback_strategy=['claude-instant-1', 'gpt-3.5-turbo', 'command-nightly']
46 | for model in fallback_strategy:
47 | response = None
48 | attempt = 0
49 | new_data = deepcopy(data)
50 | execution_complete = False
51 | for attempt in range(2):
52 | try:
53 | if model in rate_limited_models: # check if model is currently cooling down
54 | if model_expiration_times.get(model) and time.time() >= model_expiration_times[model]:
55 | rate_limited_models.remove(model) # check if it's been 60s of cool down and remove model
56 | else:
57 | continue # skip model
58 | ## PREPARE FOR CALL
59 | if isinstance(model, str):
60 | new_data["model"] = model
61 | elif isinstance(model, dict):
62 | new_data["model"] = model["model"]
63 | new_data["custom_llm_provider"] = model["custom_llm_provider"] if "custom_llm_provider" in model else None
64 | new_data["custom_api_base"] = model["custom_api_base"] if "custom_api_base" in model else None
65 | print("model type: ", type(model))
66 | print(f"new_data[model]: {new_data['model']}")
67 | ## COMPLETION CALL
68 | response = completion(**new_data)
69 | except Exception as e:
70 | print(f"Got Error handle_error(): {e}")
71 | end_time = time.time()
72 | traceback_exception = traceback.format_exc()
73 | request_logging.on_request_failure(e, traceback_exception, data, auth_headers, start_time, end_time) # don't do this threaded - else sentry's capture exception will save the wrong input params (since we're doing model fallbacks)
74 | error_type = type(e).__name__
75 | print(f"error_type handle_error(): {error_type}")
76 | llm_provider = e.llm_provider
77 | if "AuthenticationError" in error_type and attempt < 1: # don't retry twice with a bad model key
78 | print(f"handle_error() - Input new_data: {new_data} \n Environment Variables: {os.environ}")
79 | # switch to the next key
80 | new_data["api_key"] = backup_keys[llm_provider] # dynamically set the backup key - litellm checks this before checking os.environ - https://github.com/BerriAI/litellm/blob/cff26b1d08ba240dcecea7df78a7833990336e6b/litellm/main.py#L112
81 | elif attempt > 0: # wait a random period before retrying
82 | # wait a random period before retrying
83 | wait_time = random.randint(1, 10)
84 | time.sleep(wait_time)
85 | elif attempt == 2:
86 | rate_limited_models.add(model)
87 | if response != None:
88 | break
89 | if response != None:
90 | end_time = time.time()
91 | ## LOGGING SUCCESS
92 | threading.Thread(target=request_logging.on_request_success, args=(new_data, auth_headers, response, start_time, end_time)).start() # don't block execution of main thread
93 | break
94 | return response
95 |
96 |
97 | ########### Pricing is tracked in Supabase ############
98 |
99 |
100 |
101 | import uuid
102 | cache_collection = None
103 | # Add a response to the cache
104 | def add_cache(messages, model_response):
105 | global cache_collection
106 | if cache_collection is None:
107 | make_collection()
108 |
109 | user_question = message_to_user_question(messages)
110 |
111 | # Add the user question and model response to the cache
112 | cache_collection.add(
113 | documents=[user_question],
114 | metadatas=[{"model_response": str(model_response)}],
115 | ids=[str(uuid.uuid4())]
116 | )
117 | return
118 |
119 | # Retrieve a response from the cache if similarity is above the threshold
120 | def get_cache(messages, similarity_threshold):
121 | try:
122 | global cache_collection
123 | if cache_collection is None:
124 | make_collection()
125 |
126 | user_question = message_to_user_question(messages)
127 |
128 | # Query the cache for the user question
129 | results = cache_collection.query(
130 | query_texts=[user_question],
131 | n_results=1
132 | )
133 |
134 | if len(results['distances'][0]) == 0:
135 | return None # Cache is empty
136 |
137 | distance = results['distances'][0][0]
138 | sim = (1 - distance)
139 |
140 | if sim >= similarity_threshold:
141 | return results['metadatas'][0][0]["model_response"] # Return cached response
142 | else:
143 | return None # No cache hit
144 | except Exception as e:
145 | print("Error in get cache", e)
146 | raise e
147 |
148 | # Initialize the cache collection
149 | def make_collection():
150 | import chromadb
151 | global cache_collection
152 | client = chromadb.Client()
153 | cache_collection = client.create_collection("llm_responses")
154 |
155 | # HELPER: Extract user's question from messages
156 | def message_to_user_question(messages):
157 | user_question = ""
158 | for message in messages:
159 | if message['role'] == 'user':
160 | user_question += message["content"]
161 | return user_question
162 |
163 |
164 | class Logging:
165 | def __init__(self, successful_callbacks, failure_callbacks, verbose, verbose_level=1):
166 | # Constructor
167 | self.verbose = verbose
168 | self.verbose_level = verbose_level
169 | self.successful_callbacks = successful_callbacks
170 | self.failure_callbacks = failure_callbacks
171 | self.callback_list = list(set(successful_callbacks + failure_callbacks))
172 | self.tinyDBClient = None
173 | self.sentryClient = None
174 | self.init_callbacks()
175 |
176 | def print_verbose(self, print_statement, level):
177 | if self.verbose and self.verbose_level == level:
178 | print(print_statement)
179 |
180 | def init_callbacks(self):
181 | for callback in self.callback_list:
182 | if callback == "tinydb":
183 | self.tinyDBClient = TinyDB()
184 | if callback == "sentry":
185 | self.sentryClient = Sentry()
186 |
187 |
188 | def on_request_start(self, data):
189 | # Any logging to be done before function is executed - Non-blocking
190 | try:
191 | if self.sentryClient:
192 | self.sentryClient.add_breadcrumb(
193 | category="litellm.proxy.llm_call",
194 | message=f"Input Data: {data} \n Environment Variables: {os.environ}",
195 | level="info",
196 | )
197 | pass
198 | except:
199 | traceback.print_exc()
200 | self.print_verbose(f"Got Error on_request_start: {traceback.format_exc()}", level=1)
201 |
202 | def on_request_success(self, data, request_key, result, start_time, end_time):
203 | # log event on success - Non-blocking
204 | try:
205 | for callback in self.successful_callbacks:
206 | if callback == "tinydb":
207 | model = data["model"]
208 | messages = data["messages"]
209 | user = data["user"] if "user" in data else None
210 | request_key = request_key
211 | self.tinyDBClient.log_event(model=model, messages=messages, user=user, request_key=request_key, response_obj = result, start_time=start_time, end_time=end_time, print_verbose=self.print_verbose)
212 | except:
213 | traceback.print_exc()
214 | self.print_verbose(f"Got Error on_request_success: {traceback.format_exc()}", level=1)
215 | pass
216 |
217 | def on_request_failure(self, exception, traceback_exception, data, request_key, start_time, end_time):
218 | # log event on failure - Non-blocking
219 | try:
220 | self.print_verbose(f"failure callbacks: {self.failure_callbacks}", level=2)
221 | for callback in self.failure_callbacks:
222 | if callback == "tinydb":
223 | model = data["model"]
224 | messages = data["messages"]
225 | request_key = request_key
226 | user = data["user"] if "user" in data else None
227 | result = {
228 | "model": model,
229 | "created": time.time(),
230 | "error": traceback_exception,
231 | "usage": {
232 | "prompt_tokens": litellm.token_counter(model, text=" ".join(message["content"] for message in messages)),
233 | "completion_tokens": 0
234 | }
235 | }
236 | self.tinyDBClient.log_event(model=model, messages=messages, user=user, request_key=request_key, response_obj = result, start_time=start_time, end_time=end_time, print_verbose=self.print_verbose)
237 | if callback == "sentry":
238 | self.sentryClient.capture_exception(exception)
239 | except:
240 | self.print_verbose(f"Got Error on_request_failure: {traceback.format_exc()}", level=1)
241 | pass
--------------------------------------------------------------------------------
/request_logs.json:
--------------------------------------------------------------------------------
1 | {"_default": {"1": {"request_key": "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", "response_time": 1.0259308815002441, "user": "krrish@berri.ai", "created_at": 1692328106, "model": "gpt-3.5-turbo-0613", "total_cost": 6.35e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "response": "Clarifying the requested language: Is it \"liteLLM\" or \"Lite LLM\"?"}, "2": {"request_key": "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", "response_time": 0.909782886505127, "user": "ishaan@berri.ai", "created_at": 1692328108.1154199, "model": "command-nightly", "total_cost": 0.00045000000000000004, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "response": " LiteLLM is a language model that can be used to generate text based on a given prompt."}, "3": {"response_time": 0.006799936294555664, "user": "krrish@berri.ai", "created_at": 1692365843.349687, "model": "claude-instant-1", "total_cost": 1.63e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 64, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "4": {"response_time": 0.004435062408447266, "user": "krrish@berri.ai", "created_at": 1692366062.7117639, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 64, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "5": {"response_time": 0.0068759918212890625, "user": "krrish@berri.ai", "created_at": 1692366150.066541, "model": "claude-instant-1", "total_cost": 1.63e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "6": {"response_time": 0.003229856491088867, "user": "krrish@berri.ai", "created_at": 1692366181.920657, "model": "claude-instant-1", "total_cost": 1.63e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "7": {"response_time": 0.006825923919677734, "user": "krrish@berri.ai", "created_at": 1692366503.671454, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "8": {"response_time": 0.006394147872924805, "user": "krrish@berri.ai", "created_at": 1692367073.693057, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "9": {"response_time": 0.006788969039916992, "user": "krrish@berri.ai", "created_at": 1692367194.660077, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "10": {"response_time": 0.0050640106201171875, "user": "krrish@berri.ai", "created_at": 1692367267.247115, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "11": {"response_time": 0.005023002624511719, "user": "krrish@berri.ai", "created_at": 1692367311.4849532, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "12": {"response_time": 0.00492095947265625, "user": "krrish@berri.ai", "created_at": 1692367398.853602, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "13": {"response_time": 0.006744861602783203, "user": "krrish@berri.ai", "created_at": 1692367518.815507, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "14": {"response_time": 0.0064618587493896484, "user": "krrish@berri.ai", "created_at": 1692367601.004457, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "15": {"response_time": 0.007676124572753906, "user": "krrish@berri.ai", "created_at": 1692367952.4628842, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "16": {"response_time": 0.0051152706146240234, "user": "krrish@berri.ai", "created_at": 1692367998.5826821, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "17": {"request_key": "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", "response_time": 7.444961071014404, "user": "krrish@berri.ai", "created_at": 1692368006.020113, "model": "claude-instant-1", "total_cost": 0.00109828, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "response": " Here is a one paragraph essay in a simplified style that can be understood by most readers:\n\nLearning a new language is both difficult and rewarding. When you start learning a new language, everything seems strange and unfamiliar. The words don't sound right, the grammar is different than what you're used to, and you have trouble putting sentences together. It can feel like your brain is being asked to work in a whole new way. But if you stick with it and keep practicing everyday, little by little you'll start to understand more. Simple words and phrases will become familiar. Your pronunciation and accent will start to improve. Reading comprehension and conversation ability increases over time through consistent hard work. While learning a new language takes dedication, seeing your skills grow bit by bit is very satisfying. Mastering a new language opens up new ways of thinking and allows you to connect with more people in the world. The challenges of learning are outweighed by the many benefits of becoming multilingual."}, "18": {"response_time": 0.00702977180480957, "user": "krrish@berri.ai", "created_at": 1692368100.3468301, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "19": {"request_key": "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", "response_time": 6.530564069747925, "user": "krrish@berri.ai", "created_at": 1692368106.86482, "model": "claude-instant-1", "total_cost": 0.00102665, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "response": " Here is a one paragraph essay written in a more simplified language style:\n\nCaring for our world is important. The Earth gives us many good things like fresh air, clean water, plants and animals. But sometimes we hurt the Earth without meaning to. When we leave trash on the ground, it can hurt plants and animals. Trash also makes the Earth and water dirty. Many people are trying to help fix this problem. We can help too by putting our trash in the garbage can. We should also try to use less things that are bad for the Earth like plastic. If we all work together to take care of the Earth, it will stay healthy for a long time. Then future kids and their kids will get to enjoy the Earth too. They will get to see cool animals and breathe fresh air just like we do now. We need to protect the Earth so that others can enjoy it after us."}, "20": {"response_time": 0.00669097900390625, "user": "krrish@berri.ai", "created_at": 1692368168.629498, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "21": {"response_time": 0.005354881286621094, "user": "krrish@berri.ai", "created_at": 1692368219.4735959, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "22": {"request_key": "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", "response_time": 4.679172992706299, "user": "krrish@berri.ai", "created_at": 1692368224.141632, "model": "claude-instant-1", "total_cost": 0.00107624, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "response": " Here is a one paragraph essay written in a liteLLM style:\n\nThe other day I was chillin at home watchin some Netflix and thinking about life, you know, the usual stuff. But then I started ponderin' about how we're always on our phones these days scrollin through socials and whatnot. Don't get me wrong, I love me some Instagram and TikTok, but I was noticing how we never really be in the moment no more, you feel? Like back in the day people would just be hangin out together in person without all the distractions. So I think we should all try makin more of an effort to put the phones away sometimes and really connect with those around us. It ain't good for our mental health or our relationships if we always glued to a screen instead of each other. We needa find a better balance, and remember that real life is usually better than what we see online."}, "23": {"response_time": 0.0068359375, "user": "krrish@berri.ai", "created_at": 1692368446.20898, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "24": {"response_time": 0.006057024002075195, "user": "krrish@berri.ai", "created_at": 1692368534.359404, "model": "gpt-3.5-turbo", "total_cost": 1.5e-05, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "error": "Traceback (most recent call last):\n File \"/Users/krrishdholakia/Documents/liteLLM-proxy/main.py\", line 65, in api_completion\n response = completion(**data)\n ^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 243, in wrapper\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 227, in wrapper\n result = original_function(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 44, in wrapper\n result = future.result(timeout=local_timeout_duration)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 456, in result\n return self.__get_result()\n ^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py\", line 401, in __get_result\n raise self._exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/timeout.py\", line 35, in async_func\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 462, in completion\n raise exception_type(model=model, custom_llm_provider=custom_llm_provider, original_exception=e)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 751, in exception_type\n raise e\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/utils.py\", line 688, in exception_type\n raise original_exception\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/litellm/main.py\", line 108, in completion\n response = openai.ChatCompletion.create(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py\", line 25, in create\n return super().create(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 149, in create\n ) = cls.__prepare_create_request(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py\", line 106, in __prepare_create_request\n requestor = api_requestor.APIRequestor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py\", line 138, in __init__\n self.api_key = key or util.default_api_key()\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py\", line 186, in default_api_key\n raise openai.error.AuthenticationError(\nopenai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = ', or you can set the environment variable OPENAI_API_KEY=). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = '. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.\n"}, "25": {"request_key": "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", "response_time": 6.62284779548645, "user": "krrish@berri.ai", "created_at": 1692368540.971305, "model": "claude-instant-1", "total_cost": 0.00137929, "messages": [{"role": "user", "content": "write a 1 pg essay in liteLLM"}], "response": " Here is a one page essay in light romance prose:\n\nThe sun was beginning to set over the rolling green hills as Elaina walked along the old stone path. A light breeze stirred the towering oak trees that lined the way, sending flickering shadows dancing across the ground. It had been a long day in the village market, bartering for goods and supplies, but the solitude of the countryside path soothed her tired soul. \n\nAs she walked, her thoughts turned to Thomas. It had been nearly a week since she had seen the farmer from the neighbouring farm. His strong yet gentle manner always made her heart flutter, even from afar. Would he be finishing his chores soon and making his own way home along this very path? Elaina quickened her step slightly at the thought of a chance encounter in the gathering dusk. \n\nRounding the corner, movement in the distance caught her eye. A lone figure was walking towards her, a straw hat shielding their face from the fading light. As they drew nearer, Elaina\u2019s breath caught in her throat. It was Thomas. His worn boots scuffed the dirt path and in his hands he carried a wicker basket, no doubt filled with fresh eggs and vegetables from"}}}
--------------------------------------------------------------------------------
/admin-dashboard/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "admin-dashboard",
3 | "version": "0.1.0",
4 | "lockfileVersion": 3,
5 | "requires": true,
6 | "packages": {
7 | "": {
8 | "name": "admin-dashboard",
9 | "version": "0.1.0",
10 | "dependencies": {
11 | "@heroicons/react": "^1.0.6",
12 | "@tremor/react": "^3.6.1",
13 | "autoprefixer": "10.4.15",
14 | "next": "13.4.17",
15 | "postcss": "8.4.28",
16 | "react": "18.2.0",
17 | "react-dom": "18.2.0",
18 | "tailwindcss": "3.3.3"
19 | }
20 | },
21 | "node_modules/@alloc/quick-lru": {
22 | "version": "5.2.0",
23 | "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz",
24 | "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==",
25 | "engines": {
26 | "node": ">=10"
27 | },
28 | "funding": {
29 | "url": "https://github.com/sponsors/sindresorhus"
30 | }
31 | },
32 | "node_modules/@babel/runtime": {
33 | "version": "7.22.10",
34 | "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.10.tgz",
35 | "integrity": "sha512-21t/fkKLMZI4pqP2wlmsQAWnYW1PDyKyyUV4vCi+B25ydmdaYTKXPwCj0BzSUnZf4seIiYvSA3jcZ3gdsMFkLQ==",
36 | "dependencies": {
37 | "regenerator-runtime": "^0.14.0"
38 | },
39 | "engines": {
40 | "node": ">=6.9.0"
41 | }
42 | },
43 | "node_modules/@floating-ui/core": {
44 | "version": "1.4.1",
45 | "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.4.1.tgz",
46 | "integrity": "sha512-jk3WqquEJRlcyu7997NtR5PibI+y5bi+LS3hPmguVClypenMsCY3CBa3LAQnozRCtCrYWSEtAdiskpamuJRFOQ==",
47 | "dependencies": {
48 | "@floating-ui/utils": "^0.1.1"
49 | }
50 | },
51 | "node_modules/@floating-ui/dom": {
52 | "version": "1.5.1",
53 | "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.5.1.tgz",
54 | "integrity": "sha512-KwvVcPSXg6mQygvA1TjbN/gh///36kKtllIF8SUm0qpFj8+rvYrpvlYdL1JoA71SHpDqgSSdGOSoQ0Mp3uY5aw==",
55 | "dependencies": {
56 | "@floating-ui/core": "^1.4.1",
57 | "@floating-ui/utils": "^0.1.1"
58 | }
59 | },
60 | "node_modules/@floating-ui/react": {
61 | "version": "0.19.2",
62 | "resolved": "https://registry.npmjs.org/@floating-ui/react/-/react-0.19.2.tgz",
63 | "integrity": "sha512-JyNk4A0Ezirq8FlXECvRtQOX/iBe5Ize0W/pLkrZjfHW9GUV7Xnq6zm6fyZuQzaHHqEnVizmvlA96e1/CkZv+w==",
64 | "dependencies": {
65 | "@floating-ui/react-dom": "^1.3.0",
66 | "aria-hidden": "^1.1.3",
67 | "tabbable": "^6.0.1"
68 | },
69 | "peerDependencies": {
70 | "react": ">=16.8.0",
71 | "react-dom": ">=16.8.0"
72 | }
73 | },
74 | "node_modules/@floating-ui/react-dom": {
75 | "version": "1.3.0",
76 | "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-1.3.0.tgz",
77 | "integrity": "sha512-htwHm67Ji5E/pROEAr7f8IKFShuiCKHwUC/UY4vC3I5jiSvGFAYnSYiZO5MlGmads+QqvUkR9ANHEguGrDv72g==",
78 | "dependencies": {
79 | "@floating-ui/dom": "^1.2.1"
80 | },
81 | "peerDependencies": {
82 | "react": ">=16.8.0",
83 | "react-dom": ">=16.8.0"
84 | }
85 | },
86 | "node_modules/@floating-ui/utils": {
87 | "version": "0.1.1",
88 | "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.1.1.tgz",
89 | "integrity": "sha512-m0G6wlnhm/AX0H12IOWtK8gASEMffnX08RtKkCgTdHb9JpHKGloI7icFfLg9ZmQeavcvR0PKmzxClyuFPSjKWw=="
90 | },
91 | "node_modules/@headlessui/react": {
92 | "version": "1.7.17",
93 | "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.17.tgz",
94 | "integrity": "sha512-4am+tzvkqDSSgiwrsEpGWqgGo9dz8qU5M3znCkC4PgkpY4HcCZzEDEvozltGGGHIKl9jbXbZPSH5TWn4sWJdow==",
95 | "dependencies": {
96 | "client-only": "^0.0.1"
97 | },
98 | "engines": {
99 | "node": ">=10"
100 | },
101 | "peerDependencies": {
102 | "react": "^16 || ^17 || ^18",
103 | "react-dom": "^16 || ^17 || ^18"
104 | }
105 | },
106 | "node_modules/@headlessui/tailwindcss": {
107 | "version": "0.1.3",
108 | "resolved": "https://registry.npmjs.org/@headlessui/tailwindcss/-/tailwindcss-0.1.3.tgz",
109 | "integrity": "sha512-3aMdDyYZx9A15euRehpppSyQnb2gIw2s/Uccn2ELIoLQ9oDy0+9oRygNWNjXCD5Dt+w1pxo7C+XoiYvGcqA4Kg==",
110 | "engines": {
111 | "node": ">=10"
112 | },
113 | "peerDependencies": {
114 | "tailwindcss": "^3.0"
115 | }
116 | },
117 | "node_modules/@heroicons/react": {
118 | "version": "1.0.6",
119 | "resolved": "https://registry.npmjs.org/@heroicons/react/-/react-1.0.6.tgz",
120 | "integrity": "sha512-JJCXydOFWMDpCP4q13iEplA503MQO3xLoZiKum+955ZCtHINWnx26CUxVxxFQu/uLb4LW3ge15ZpzIkXKkJ8oQ==",
121 | "peerDependencies": {
122 | "react": ">= 16"
123 | }
124 | },
125 | "node_modules/@jridgewell/gen-mapping": {
126 | "version": "0.3.3",
127 | "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz",
128 | "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==",
129 | "dependencies": {
130 | "@jridgewell/set-array": "^1.0.1",
131 | "@jridgewell/sourcemap-codec": "^1.4.10",
132 | "@jridgewell/trace-mapping": "^0.3.9"
133 | },
134 | "engines": {
135 | "node": ">=6.0.0"
136 | }
137 | },
138 | "node_modules/@jridgewell/resolve-uri": {
139 | "version": "3.1.1",
140 | "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz",
141 | "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==",
142 | "engines": {
143 | "node": ">=6.0.0"
144 | }
145 | },
146 | "node_modules/@jridgewell/set-array": {
147 | "version": "1.1.2",
148 | "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
149 | "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
150 | "engines": {
151 | "node": ">=6.0.0"
152 | }
153 | },
154 | "node_modules/@jridgewell/sourcemap-codec": {
155 | "version": "1.4.15",
156 | "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
157 | "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg=="
158 | },
159 | "node_modules/@jridgewell/trace-mapping": {
160 | "version": "0.3.19",
161 | "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz",
162 | "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==",
163 | "dependencies": {
164 | "@jridgewell/resolve-uri": "^3.1.0",
165 | "@jridgewell/sourcemap-codec": "^1.4.14"
166 | }
167 | },
168 | "node_modules/@next/env": {
169 | "version": "13.4.17",
170 | "resolved": "https://registry.npmjs.org/@next/env/-/env-13.4.17.tgz",
171 | "integrity": "sha512-rSGmt25Wxk0vGzZxDWBLE8jVW/C/JN20P3IhHc2tKVajEGy/oxStD9PbqcbCz6yOub82jYAWLqnoMITnssB+3g=="
172 | },
173 | "node_modules/@next/swc-darwin-arm64": {
174 | "version": "13.4.17",
175 | "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.4.17.tgz",
176 | "integrity": "sha512-dzsHchMmBwa6w6Gf5sp5+WmVt9/H4KWdSHy45aFE/UNmgr9V9eKfTW29k9Np9glLCEzrwnU1MztbAqDrnV9gEA==",
177 | "cpu": [
178 | "arm64"
179 | ],
180 | "optional": true,
181 | "os": [
182 | "darwin"
183 | ],
184 | "engines": {
185 | "node": ">= 10"
186 | }
187 | },
188 | "node_modules/@next/swc-darwin-x64": {
189 | "version": "13.4.17",
190 | "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.17.tgz",
191 | "integrity": "sha512-iH6UpMj3S40cZkJwYD+uBsAYACNu9TUCae47q2kqx1WzO3JuN/m5Zg22Cpwum/HLRJUa7ysJva/FG2noXbI0yw==",
192 | "cpu": [
193 | "x64"
194 | ],
195 | "optional": true,
196 | "os": [
197 | "darwin"
198 | ],
199 | "engines": {
200 | "node": ">= 10"
201 | }
202 | },
203 | "node_modules/@next/swc-linux-arm64-gnu": {
204 | "version": "13.4.17",
205 | "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.17.tgz",
206 | "integrity": "sha512-yj3YKGkSg52GL+4XhdfidibYJoq/5pYkQAc8Z4Q1e1nJ7CTOKn4KobTDLXqC5QVJncQRxC2u6vGaMLBe2UUa5Q==",
207 | "cpu": [
208 | "arm64"
209 | ],
210 | "optional": true,
211 | "os": [
212 | "linux"
213 | ],
214 | "engines": {
215 | "node": ">= 10"
216 | }
217 | },
218 | "node_modules/@next/swc-linux-arm64-musl": {
219 | "version": "13.4.17",
220 | "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.17.tgz",
221 | "integrity": "sha512-w8+8ShThIgIgIkLk22NY+ZMF/yf5Dl6+tqOaNUzXy6b0gQSwtpVb0t4eSTx2VUqRxLl36dv9cqomGbthvuPiGA==",
222 | "cpu": [
223 | "arm64"
224 | ],
225 | "optional": true,
226 | "os": [
227 | "linux"
228 | ],
229 | "engines": {
230 | "node": ">= 10"
231 | }
232 | },
233 | "node_modules/@next/swc-linux-x64-gnu": {
234 | "version": "13.4.17",
235 | "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.17.tgz",
236 | "integrity": "sha512-IQlJNdxvfqgHxJU6ITERf9qaA0m6mRo/gD0al/5CcXvs6cDihR/UzI09Bc+3vQSJV3ACAzrZjsF7dtdzVutvog==",
237 | "cpu": [
238 | "x64"
239 | ],
240 | "optional": true,
241 | "os": [
242 | "linux"
243 | ],
244 | "engines": {
245 | "node": ">= 10"
246 | }
247 | },
248 | "node_modules/@next/swc-linux-x64-musl": {
249 | "version": "13.4.17",
250 | "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.17.tgz",
251 | "integrity": "sha512-retAiJGtOS216pqAcNOwBUOqgqDH7kYzzj4jLrfVcb/sCQJ+JawMwayc3LEbpvMDZx8CHLECcs6bB45mMxkZEw==",
252 | "cpu": [
253 | "x64"
254 | ],
255 | "optional": true,
256 | "os": [
257 | "linux"
258 | ],
259 | "engines": {
260 | "node": ">= 10"
261 | }
262 | },
263 | "node_modules/@next/swc-win32-arm64-msvc": {
264 | "version": "13.4.17",
265 | "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.17.tgz",
266 | "integrity": "sha512-PtaemQL9rwoRtS6kgjXxfRQLUbzBmtMxaXZTBnKnb+EjrDFkC+YI82kktL97LMrHRGQsMJcBQQtNQDJCBJmu2Q==",
267 | "cpu": [
268 | "arm64"
269 | ],
270 | "optional": true,
271 | "os": [
272 | "win32"
273 | ],
274 | "engines": {
275 | "node": ">= 10"
276 | }
277 | },
278 | "node_modules/@next/swc-win32-ia32-msvc": {
279 | "version": "13.4.17",
280 | "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.17.tgz",
281 | "integrity": "sha512-5jJVxit2B3g/zRWJJ6/YeMHBch7PL10O5qR5BZyuFCoO/bg6MPtz5+U+FvbVCSgCKePU19lRGNsyX+BAu/V+vw==",
282 | "cpu": [
283 | "ia32"
284 | ],
285 | "optional": true,
286 | "os": [
287 | "win32"
288 | ],
289 | "engines": {
290 | "node": ">= 10"
291 | }
292 | },
293 | "node_modules/@next/swc-win32-x64-msvc": {
294 | "version": "13.4.17",
295 | "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.17.tgz",
296 | "integrity": "sha512-3QOf2LfziycZW1iVgiA63xVVUMkawurZJ/jwgBqziUNw4r8XHLenNTgbn5XcdHqKuZKUuLSi/6v1/4myGWM0GA==",
297 | "cpu": [
298 | "x64"
299 | ],
300 | "optional": true,
301 | "os": [
302 | "win32"
303 | ],
304 | "engines": {
305 | "node": ">= 10"
306 | }
307 | },
308 | "node_modules/@nodelib/fs.scandir": {
309 | "version": "2.1.5",
310 | "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
311 | "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
312 | "dependencies": {
313 | "@nodelib/fs.stat": "2.0.5",
314 | "run-parallel": "^1.1.9"
315 | },
316 | "engines": {
317 | "node": ">= 8"
318 | }
319 | },
320 | "node_modules/@nodelib/fs.stat": {
321 | "version": "2.0.5",
322 | "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
323 | "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
324 | "engines": {
325 | "node": ">= 8"
326 | }
327 | },
328 | "node_modules/@nodelib/fs.walk": {
329 | "version": "1.2.8",
330 | "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
331 | "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
332 | "dependencies": {
333 | "@nodelib/fs.scandir": "2.1.5",
334 | "fastq": "^1.6.0"
335 | },
336 | "engines": {
337 | "node": ">= 8"
338 | }
339 | },
340 | "node_modules/@swc/helpers": {
341 | "version": "0.5.1",
342 | "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.1.tgz",
343 | "integrity": "sha512-sJ902EfIzn1Fa+qYmjdQqh8tPsoxyBz+8yBKC2HKUxyezKJFwPGOn7pv4WY6QuQW//ySQi5lJjA/ZT9sNWWNTg==",
344 | "dependencies": {
345 | "tslib": "^2.4.0"
346 | }
347 | },
348 | "node_modules/@tremor/react": {
349 | "version": "3.6.1",
350 | "resolved": "https://registry.npmjs.org/@tremor/react/-/react-3.6.1.tgz",
351 | "integrity": "sha512-AHLKwW62kTGjZVEMO8INRNFV/wGM3yZE0eyczMWQ2JrKQ4SNlt6Cn9Q8pnSP/Spub0YBOH55EgU9K0sLjgAVNQ==",
352 | "dependencies": {
353 | "@floating-ui/react": "^0.19.1",
354 | "@headlessui/react": "^1.7.14",
355 | "@headlessui/tailwindcss": "^0.1.3",
356 | "date-fns": "^2.28.0",
357 | "react-day-picker": "^8.7.1",
358 | "react-transition-group": "^4.4.5",
359 | "recharts": "^2.7.1",
360 | "tailwind-merge": "^1.9.1"
361 | },
362 | "peerDependencies": {
363 | "react": "^18.0.0",
364 | "react-dom": ">=16.6.0"
365 | }
366 | },
367 | "node_modules/@types/d3-array": {
368 | "version": "3.0.5",
369 | "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.0.5.tgz",
370 | "integrity": "sha512-Qk7fpJ6qFp+26VeQ47WY0mkwXaiq8+76RJcncDEfMc2ocRzXLO67bLFRNI4OX1aGBoPzsM5Y2T+/m1pldOgD+A=="
371 | },
372 | "node_modules/@types/d3-color": {
373 | "version": "3.1.0",
374 | "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.0.tgz",
375 | "integrity": "sha512-HKuicPHJuvPgCD+np6Se9MQvS6OCbJmOjGvylzMJRlDwUXjKTTXs6Pwgk79O09Vj/ho3u1ofXnhFOaEWWPrlwA=="
376 | },
377 | "node_modules/@types/d3-ease": {
378 | "version": "3.0.0",
379 | "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.0.tgz",
380 | "integrity": "sha512-aMo4eaAOijJjA6uU+GIeW018dvy9+oH5Y2VPPzjjfxevvGQ/oRDs+tfYC9b50Q4BygRR8yE2QCLsrT0WtAVseA=="
381 | },
382 | "node_modules/@types/d3-interpolate": {
383 | "version": "3.0.1",
384 | "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
385 | "integrity": "sha512-jx5leotSeac3jr0RePOH1KdR9rISG91QIE4Q2PYTu4OymLTZfA3SrnURSLzKH48HmXVUru50b8nje4E79oQSQw==",
386 | "dependencies": {
387 | "@types/d3-color": "*"
388 | }
389 | },
390 | "node_modules/@types/d3-path": {
391 | "version": "3.0.0",
392 | "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.0.0.tgz",
393 | "integrity": "sha512-0g/A+mZXgFkQxN3HniRDbXMN79K3CdTpLsevj+PXiTcb2hVyvkZUBg37StmgCQkaD84cUJ4uaDAWq7UJOQy2Tg=="
394 | },
395 | "node_modules/@types/d3-scale": {
396 | "version": "4.0.3",
397 | "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.3.tgz",
398 | "integrity": "sha512-PATBiMCpvHJSMtZAMEhc2WyL+hnzarKzI6wAHYjhsonjWJYGq5BXTzQjv4l8m2jO183/4wZ90rKvSeT7o72xNQ==",
399 | "dependencies": {
400 | "@types/d3-time": "*"
401 | }
402 | },
403 | "node_modules/@types/d3-shape": {
404 | "version": "3.1.1",
405 | "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.1.tgz",
406 | "integrity": "sha512-6Uh86YFF7LGg4PQkuO2oG6EMBRLuW9cbavUW46zkIO5kuS2PfTqo2o9SkgtQzguBHbLgNnU90UNsITpsX1My+A==",
407 | "dependencies": {
408 | "@types/d3-path": "*"
409 | }
410 | },
411 | "node_modules/@types/d3-time": {
412 | "version": "3.0.0",
413 | "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz",
414 | "integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg=="
415 | },
416 | "node_modules/@types/d3-timer": {
417 | "version": "3.0.0",
418 | "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.0.tgz",
419 | "integrity": "sha512-HNB/9GHqu7Fo8AQiugyJbv6ZxYz58wef0esl4Mv828w1ZKpAshw/uFWVDUcIB9KKFeFKoxS3cHY07FFgtTRZ1g=="
420 | },
421 | "node_modules/any-promise": {
422 | "version": "1.3.0",
423 | "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
424 | "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A=="
425 | },
426 | "node_modules/anymatch": {
427 | "version": "3.1.3",
428 | "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
429 | "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
430 | "dependencies": {
431 | "normalize-path": "^3.0.0",
432 | "picomatch": "^2.0.4"
433 | },
434 | "engines": {
435 | "node": ">= 8"
436 | }
437 | },
438 | "node_modules/arg": {
439 | "version": "5.0.2",
440 | "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz",
441 | "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="
442 | },
443 | "node_modules/aria-hidden": {
444 | "version": "1.2.3",
445 | "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.3.tgz",
446 | "integrity": "sha512-xcLxITLe2HYa1cnYnwCjkOO1PqUHQpozB8x9AR0OgWN2woOBi5kSDVxKfd0b7sb1hw5qFeJhXm9H1nu3xSfLeQ==",
447 | "dependencies": {
448 | "tslib": "^2.0.0"
449 | },
450 | "engines": {
451 | "node": ">=10"
452 | }
453 | },
454 | "node_modules/autoprefixer": {
455 | "version": "10.4.15",
456 | "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.15.tgz",
457 | "integrity": "sha512-KCuPB8ZCIqFdA4HwKXsvz7j6gvSDNhDP7WnUjBleRkKjPdvCmHFuQ77ocavI8FT6NdvlBnE2UFr2H4Mycn8Vew==",
458 | "funding": [
459 | {
460 | "type": "opencollective",
461 | "url": "https://opencollective.com/postcss/"
462 | },
463 | {
464 | "type": "tidelift",
465 | "url": "https://tidelift.com/funding/github/npm/autoprefixer"
466 | },
467 | {
468 | "type": "github",
469 | "url": "https://github.com/sponsors/ai"
470 | }
471 | ],
472 | "dependencies": {
473 | "browserslist": "^4.21.10",
474 | "caniuse-lite": "^1.0.30001520",
475 | "fraction.js": "^4.2.0",
476 | "normalize-range": "^0.1.2",
477 | "picocolors": "^1.0.0",
478 | "postcss-value-parser": "^4.2.0"
479 | },
480 | "bin": {
481 | "autoprefixer": "bin/autoprefixer"
482 | },
483 | "engines": {
484 | "node": "^10 || ^12 || >=14"
485 | },
486 | "peerDependencies": {
487 | "postcss": "^8.1.0"
488 | }
489 | },
490 | "node_modules/balanced-match": {
491 | "version": "1.0.2",
492 | "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
493 | "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
494 | },
495 | "node_modules/binary-extensions": {
496 | "version": "2.2.0",
497 | "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
498 | "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
499 | "engines": {
500 | "node": ">=8"
501 | }
502 | },
503 | "node_modules/brace-expansion": {
504 | "version": "1.1.11",
505 | "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
506 | "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
507 | "dependencies": {
508 | "balanced-match": "^1.0.0",
509 | "concat-map": "0.0.1"
510 | }
511 | },
512 | "node_modules/braces": {
513 | "version": "3.0.2",
514 | "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
515 | "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
516 | "dependencies": {
517 | "fill-range": "^7.0.1"
518 | },
519 | "engines": {
520 | "node": ">=8"
521 | }
522 | },
523 | "node_modules/browserslist": {
524 | "version": "4.21.10",
525 | "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.10.tgz",
526 | "integrity": "sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==",
527 | "funding": [
528 | {
529 | "type": "opencollective",
530 | "url": "https://opencollective.com/browserslist"
531 | },
532 | {
533 | "type": "tidelift",
534 | "url": "https://tidelift.com/funding/github/npm/browserslist"
535 | },
536 | {
537 | "type": "github",
538 | "url": "https://github.com/sponsors/ai"
539 | }
540 | ],
541 | "dependencies": {
542 | "caniuse-lite": "^1.0.30001517",
543 | "electron-to-chromium": "^1.4.477",
544 | "node-releases": "^2.0.13",
545 | "update-browserslist-db": "^1.0.11"
546 | },
547 | "bin": {
548 | "browserslist": "cli.js"
549 | },
550 | "engines": {
551 | "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
552 | }
553 | },
554 | "node_modules/busboy": {
555 | "version": "1.6.0",
556 | "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
557 | "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
558 | "dependencies": {
559 | "streamsearch": "^1.1.0"
560 | },
561 | "engines": {
562 | "node": ">=10.16.0"
563 | }
564 | },
565 | "node_modules/camelcase-css": {
566 | "version": "2.0.1",
567 | "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
568 | "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
569 | "engines": {
570 | "node": ">= 6"
571 | }
572 | },
573 | "node_modules/caniuse-lite": {
574 | "version": "1.0.30001521",
575 | "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001521.tgz",
576 | "integrity": "sha512-fnx1grfpEOvDGH+V17eccmNjucGUnCbP6KL+l5KqBIerp26WK/+RQ7CIDE37KGJjaPyqWXXlFUyKiWmvdNNKmQ==",
577 | "funding": [
578 | {
579 | "type": "opencollective",
580 | "url": "https://opencollective.com/browserslist"
581 | },
582 | {
583 | "type": "tidelift",
584 | "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
585 | },
586 | {
587 | "type": "github",
588 | "url": "https://github.com/sponsors/ai"
589 | }
590 | ]
591 | },
592 | "node_modules/chokidar": {
593 | "version": "3.5.3",
594 | "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
595 | "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
596 | "funding": [
597 | {
598 | "type": "individual",
599 | "url": "https://paulmillr.com/funding/"
600 | }
601 | ],
602 | "dependencies": {
603 | "anymatch": "~3.1.2",
604 | "braces": "~3.0.2",
605 | "glob-parent": "~5.1.2",
606 | "is-binary-path": "~2.1.0",
607 | "is-glob": "~4.0.1",
608 | "normalize-path": "~3.0.0",
609 | "readdirp": "~3.6.0"
610 | },
611 | "engines": {
612 | "node": ">= 8.10.0"
613 | },
614 | "optionalDependencies": {
615 | "fsevents": "~2.3.2"
616 | }
617 | },
618 | "node_modules/chokidar/node_modules/glob-parent": {
619 | "version": "5.1.2",
620 | "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
621 | "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
622 | "dependencies": {
623 | "is-glob": "^4.0.1"
624 | },
625 | "engines": {
626 | "node": ">= 6"
627 | }
628 | },
629 | "node_modules/classnames": {
630 | "version": "2.3.2",
631 | "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz",
632 | "integrity": "sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw=="
633 | },
634 | "node_modules/client-only": {
635 | "version": "0.0.1",
636 | "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
637 | "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA=="
638 | },
639 | "node_modules/commander": {
640 | "version": "4.1.1",
641 | "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
642 | "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
643 | "engines": {
644 | "node": ">= 6"
645 | }
646 | },
647 | "node_modules/concat-map": {
648 | "version": "0.0.1",
649 | "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
650 | "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
651 | },
652 | "node_modules/css-unit-converter": {
653 | "version": "1.1.2",
654 | "resolved": "https://registry.npmjs.org/css-unit-converter/-/css-unit-converter-1.1.2.tgz",
655 | "integrity": "sha512-IiJwMC8rdZE0+xiEZHeru6YoONC4rfPMqGm2W85jMIbkFvv5nFTwJVFHam2eFrN6txmoUYFAFXiv8ICVeTO0MA=="
656 | },
657 | "node_modules/cssesc": {
658 | "version": "3.0.0",
659 | "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
660 | "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
661 | "bin": {
662 | "cssesc": "bin/cssesc"
663 | },
664 | "engines": {
665 | "node": ">=4"
666 | }
667 | },
668 | "node_modules/csstype": {
669 | "version": "3.1.2",
670 | "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz",
671 | "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ=="
672 | },
673 | "node_modules/d3-array": {
674 | "version": "3.2.4",
675 | "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-3.2.4.tgz",
676 | "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==",
677 | "dependencies": {
678 | "internmap": "1 - 2"
679 | },
680 | "engines": {
681 | "node": ">=12"
682 | }
683 | },
684 | "node_modules/d3-color": {
685 | "version": "3.1.0",
686 | "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz",
687 | "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==",
688 | "engines": {
689 | "node": ">=12"
690 | }
691 | },
692 | "node_modules/d3-ease": {
693 | "version": "3.0.1",
694 | "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz",
695 | "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==",
696 | "engines": {
697 | "node": ">=12"
698 | }
699 | },
700 | "node_modules/d3-format": {
701 | "version": "3.1.0",
702 | "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-3.1.0.tgz",
703 | "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==",
704 | "engines": {
705 | "node": ">=12"
706 | }
707 | },
708 | "node_modules/d3-interpolate": {
709 | "version": "3.0.1",
710 | "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz",
711 | "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==",
712 | "dependencies": {
713 | "d3-color": "1 - 3"
714 | },
715 | "engines": {
716 | "node": ">=12"
717 | }
718 | },
719 | "node_modules/d3-path": {
720 | "version": "3.1.0",
721 | "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-3.1.0.tgz",
722 | "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==",
723 | "engines": {
724 | "node": ">=12"
725 | }
726 | },
727 | "node_modules/d3-scale": {
728 | "version": "4.0.2",
729 | "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-4.0.2.tgz",
730 | "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==",
731 | "dependencies": {
732 | "d3-array": "2.10.0 - 3",
733 | "d3-format": "1 - 3",
734 | "d3-interpolate": "1.2.0 - 3",
735 | "d3-time": "2.1.1 - 3",
736 | "d3-time-format": "2 - 4"
737 | },
738 | "engines": {
739 | "node": ">=12"
740 | }
741 | },
742 | "node_modules/d3-shape": {
743 | "version": "3.2.0",
744 | "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-3.2.0.tgz",
745 | "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==",
746 | "dependencies": {
747 | "d3-path": "^3.1.0"
748 | },
749 | "engines": {
750 | "node": ">=12"
751 | }
752 | },
753 | "node_modules/d3-time": {
754 | "version": "3.1.0",
755 | "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-3.1.0.tgz",
756 | "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==",
757 | "dependencies": {
758 | "d3-array": "2 - 3"
759 | },
760 | "engines": {
761 | "node": ">=12"
762 | }
763 | },
764 | "node_modules/d3-time-format": {
765 | "version": "4.1.0",
766 | "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-4.1.0.tgz",
767 | "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==",
768 | "dependencies": {
769 | "d3-time": "1 - 3"
770 | },
771 | "engines": {
772 | "node": ">=12"
773 | }
774 | },
775 | "node_modules/d3-timer": {
776 | "version": "3.0.1",
777 | "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz",
778 | "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==",
779 | "engines": {
780 | "node": ">=12"
781 | }
782 | },
783 | "node_modules/date-fns": {
784 | "version": "2.30.0",
785 | "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-2.30.0.tgz",
786 | "integrity": "sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==",
787 | "dependencies": {
788 | "@babel/runtime": "^7.21.0"
789 | },
790 | "engines": {
791 | "node": ">=0.11"
792 | },
793 | "funding": {
794 | "type": "opencollective",
795 | "url": "https://opencollective.com/date-fns"
796 | }
797 | },
798 | "node_modules/decimal.js-light": {
799 | "version": "2.5.1",
800 | "resolved": "https://registry.npmjs.org/decimal.js-light/-/decimal.js-light-2.5.1.tgz",
801 | "integrity": "sha512-qIMFpTMZmny+MMIitAB6D7iVPEorVw6YQRWkvarTkT4tBeSLLiHzcwj6q0MmYSFCiVpiqPJTJEYIrpcPzVEIvg=="
802 | },
803 | "node_modules/didyoumean": {
804 | "version": "1.2.2",
805 | "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz",
806 | "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw=="
807 | },
808 | "node_modules/dlv": {
809 | "version": "1.1.3",
810 | "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz",
811 | "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA=="
812 | },
813 | "node_modules/dom-helpers": {
814 | "version": "5.2.1",
815 | "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-5.2.1.tgz",
816 | "integrity": "sha512-nRCa7CK3VTrM2NmGkIy4cbK7IZlgBE/PYMn55rrXefr5xXDP0LdtfPnblFDoVdcAfslJ7or6iqAUnx0CCGIWQA==",
817 | "dependencies": {
818 | "@babel/runtime": "^7.8.7",
819 | "csstype": "^3.0.2"
820 | }
821 | },
822 | "node_modules/electron-to-chromium": {
823 | "version": "1.4.495",
824 | "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.495.tgz",
825 | "integrity": "sha512-mwknuemBZnoOCths4GtpU/SDuVMp3uQHKa2UNJT9/aVD6WVRjGpXOxRGX7lm6ILIenTdGXPSTCTDaWos5tEU8Q=="
826 | },
827 | "node_modules/escalade": {
828 | "version": "3.1.1",
829 | "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
830 | "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
831 | "engines": {
832 | "node": ">=6"
833 | }
834 | },
835 | "node_modules/eventemitter3": {
836 | "version": "4.0.7",
837 | "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
838 | "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
839 | },
840 | "node_modules/fast-equals": {
841 | "version": "5.0.1",
842 | "resolved": "https://registry.npmjs.org/fast-equals/-/fast-equals-5.0.1.tgz",
843 | "integrity": "sha512-WF1Wi8PwwSY7/6Kx0vKXtw8RwuSGoM1bvDaJbu7MxDlR1vovZjIAKrnzyrThgAjm6JDTu0fVgWXDlMGspodfoQ==",
844 | "engines": {
845 | "node": ">=6.0.0"
846 | }
847 | },
848 | "node_modules/fast-glob": {
849 | "version": "3.3.1",
850 | "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz",
851 | "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==",
852 | "dependencies": {
853 | "@nodelib/fs.stat": "^2.0.2",
854 | "@nodelib/fs.walk": "^1.2.3",
855 | "glob-parent": "^5.1.2",
856 | "merge2": "^1.3.0",
857 | "micromatch": "^4.0.4"
858 | },
859 | "engines": {
860 | "node": ">=8.6.0"
861 | }
862 | },
863 | "node_modules/fast-glob/node_modules/glob-parent": {
864 | "version": "5.1.2",
865 | "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
866 | "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
867 | "dependencies": {
868 | "is-glob": "^4.0.1"
869 | },
870 | "engines": {
871 | "node": ">= 6"
872 | }
873 | },
874 | "node_modules/fastq": {
875 | "version": "1.15.0",
876 | "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
877 | "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
878 | "dependencies": {
879 | "reusify": "^1.0.4"
880 | }
881 | },
882 | "node_modules/fill-range": {
883 | "version": "7.0.1",
884 | "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
885 | "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
886 | "dependencies": {
887 | "to-regex-range": "^5.0.1"
888 | },
889 | "engines": {
890 | "node": ">=8"
891 | }
892 | },
893 | "node_modules/fraction.js": {
894 | "version": "4.2.0",
895 | "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz",
896 | "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==",
897 | "engines": {
898 | "node": "*"
899 | },
900 | "funding": {
901 | "type": "patreon",
902 | "url": "https://www.patreon.com/infusion"
903 | }
904 | },
905 | "node_modules/fs.realpath": {
906 | "version": "1.0.0",
907 | "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
908 | "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
909 | },
910 | "node_modules/fsevents": {
911 | "version": "2.3.2",
912 | "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
913 | "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
914 | "hasInstallScript": true,
915 | "optional": true,
916 | "os": [
917 | "darwin"
918 | ],
919 | "engines": {
920 | "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
921 | }
922 | },
923 | "node_modules/function-bind": {
924 | "version": "1.1.1",
925 | "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
926 | "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
927 | },
928 | "node_modules/glob": {
929 | "version": "7.1.6",
930 | "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
931 | "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
932 | "dependencies": {
933 | "fs.realpath": "^1.0.0",
934 | "inflight": "^1.0.4",
935 | "inherits": "2",
936 | "minimatch": "^3.0.4",
937 | "once": "^1.3.0",
938 | "path-is-absolute": "^1.0.0"
939 | },
940 | "engines": {
941 | "node": "*"
942 | },
943 | "funding": {
944 | "url": "https://github.com/sponsors/isaacs"
945 | }
946 | },
947 | "node_modules/glob-parent": {
948 | "version": "6.0.2",
949 | "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
950 | "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
951 | "dependencies": {
952 | "is-glob": "^4.0.3"
953 | },
954 | "engines": {
955 | "node": ">=10.13.0"
956 | }
957 | },
958 | "node_modules/glob-to-regexp": {
959 | "version": "0.4.1",
960 | "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
961 | "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw=="
962 | },
963 | "node_modules/graceful-fs": {
964 | "version": "4.2.11",
965 | "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
966 | "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
967 | },
968 | "node_modules/has": {
969 | "version": "1.0.3",
970 | "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
971 | "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
972 | "dependencies": {
973 | "function-bind": "^1.1.1"
974 | },
975 | "engines": {
976 | "node": ">= 0.4.0"
977 | }
978 | },
979 | "node_modules/inflight": {
980 | "version": "1.0.6",
981 | "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
982 | "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
983 | "dependencies": {
984 | "once": "^1.3.0",
985 | "wrappy": "1"
986 | }
987 | },
988 | "node_modules/inherits": {
989 | "version": "2.0.4",
990 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
991 | "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
992 | },
993 | "node_modules/internmap": {
994 | "version": "2.0.3",
995 | "resolved": "https://registry.npmjs.org/internmap/-/internmap-2.0.3.tgz",
996 | "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==",
997 | "engines": {
998 | "node": ">=12"
999 | }
1000 | },
1001 | "node_modules/is-binary-path": {
1002 | "version": "2.1.0",
1003 | "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
1004 | "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
1005 | "dependencies": {
1006 | "binary-extensions": "^2.0.0"
1007 | },
1008 | "engines": {
1009 | "node": ">=8"
1010 | }
1011 | },
1012 | "node_modules/is-core-module": {
1013 | "version": "2.13.0",
1014 | "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz",
1015 | "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==",
1016 | "dependencies": {
1017 | "has": "^1.0.3"
1018 | },
1019 | "funding": {
1020 | "url": "https://github.com/sponsors/ljharb"
1021 | }
1022 | },
1023 | "node_modules/is-extglob": {
1024 | "version": "2.1.1",
1025 | "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
1026 | "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
1027 | "engines": {
1028 | "node": ">=0.10.0"
1029 | }
1030 | },
1031 | "node_modules/is-glob": {
1032 | "version": "4.0.3",
1033 | "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
1034 | "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
1035 | "dependencies": {
1036 | "is-extglob": "^2.1.1"
1037 | },
1038 | "engines": {
1039 | "node": ">=0.10.0"
1040 | }
1041 | },
1042 | "node_modules/is-number": {
1043 | "version": "7.0.0",
1044 | "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
1045 | "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
1046 | "engines": {
1047 | "node": ">=0.12.0"
1048 | }
1049 | },
1050 | "node_modules/jiti": {
1051 | "version": "1.19.1",
1052 | "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.19.1.tgz",
1053 | "integrity": "sha512-oVhqoRDaBXf7sjkll95LHVS6Myyyb1zaunVwk4Z0+WPSW4gjS0pl01zYKHScTuyEhQsFxV5L4DR5r+YqSyqyyg==",
1054 | "bin": {
1055 | "jiti": "bin/jiti.js"
1056 | }
1057 | },
1058 | "node_modules/js-tokens": {
1059 | "version": "4.0.0",
1060 | "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
1061 | "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
1062 | },
1063 | "node_modules/lilconfig": {
1064 | "version": "2.1.0",
1065 | "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
1066 | "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==",
1067 | "engines": {
1068 | "node": ">=10"
1069 | }
1070 | },
1071 | "node_modules/lines-and-columns": {
1072 | "version": "1.2.4",
1073 | "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
1074 | "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="
1075 | },
1076 | "node_modules/lodash": {
1077 | "version": "4.17.21",
1078 | "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
1079 | "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
1080 | },
1081 | "node_modules/loose-envify": {
1082 | "version": "1.4.0",
1083 | "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
1084 | "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
1085 | "dependencies": {
1086 | "js-tokens": "^3.0.0 || ^4.0.0"
1087 | },
1088 | "bin": {
1089 | "loose-envify": "cli.js"
1090 | }
1091 | },
1092 | "node_modules/merge2": {
1093 | "version": "1.4.1",
1094 | "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
1095 | "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
1096 | "engines": {
1097 | "node": ">= 8"
1098 | }
1099 | },
1100 | "node_modules/micromatch": {
1101 | "version": "4.0.5",
1102 | "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
1103 | "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
1104 | "dependencies": {
1105 | "braces": "^3.0.2",
1106 | "picomatch": "^2.3.1"
1107 | },
1108 | "engines": {
1109 | "node": ">=8.6"
1110 | }
1111 | },
1112 | "node_modules/minimatch": {
1113 | "version": "3.1.2",
1114 | "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
1115 | "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
1116 | "dependencies": {
1117 | "brace-expansion": "^1.1.7"
1118 | },
1119 | "engines": {
1120 | "node": "*"
1121 | }
1122 | },
1123 | "node_modules/mz": {
1124 | "version": "2.7.0",
1125 | "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
1126 | "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
1127 | "dependencies": {
1128 | "any-promise": "^1.0.0",
1129 | "object-assign": "^4.0.1",
1130 | "thenify-all": "^1.0.0"
1131 | }
1132 | },
1133 | "node_modules/nanoid": {
1134 | "version": "3.3.6",
1135 | "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz",
1136 | "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==",
1137 | "funding": [
1138 | {
1139 | "type": "github",
1140 | "url": "https://github.com/sponsors/ai"
1141 | }
1142 | ],
1143 | "bin": {
1144 | "nanoid": "bin/nanoid.cjs"
1145 | },
1146 | "engines": {
1147 | "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
1148 | }
1149 | },
1150 | "node_modules/next": {
1151 | "version": "13.4.17",
1152 | "resolved": "https://registry.npmjs.org/next/-/next-13.4.17.tgz",
1153 | "integrity": "sha512-f0L+lbQA+GFkHu9wpupiURLFIEEPSVQhUuR+5lQNI+aFzbCbCGl7h0Vurs1jA4wtP7T7fEO0iSWmt37+88wIZA==",
1154 | "dependencies": {
1155 | "@next/env": "13.4.17",
1156 | "@swc/helpers": "0.5.1",
1157 | "busboy": "1.6.0",
1158 | "caniuse-lite": "^1.0.30001406",
1159 | "postcss": "8.4.14",
1160 | "styled-jsx": "5.1.1",
1161 | "watchpack": "2.4.0",
1162 | "zod": "3.21.4"
1163 | },
1164 | "bin": {
1165 | "next": "dist/bin/next"
1166 | },
1167 | "engines": {
1168 | "node": ">=16.8.0"
1169 | },
1170 | "optionalDependencies": {
1171 | "@next/swc-darwin-arm64": "13.4.17",
1172 | "@next/swc-darwin-x64": "13.4.17",
1173 | "@next/swc-linux-arm64-gnu": "13.4.17",
1174 | "@next/swc-linux-arm64-musl": "13.4.17",
1175 | "@next/swc-linux-x64-gnu": "13.4.17",
1176 | "@next/swc-linux-x64-musl": "13.4.17",
1177 | "@next/swc-win32-arm64-msvc": "13.4.17",
1178 | "@next/swc-win32-ia32-msvc": "13.4.17",
1179 | "@next/swc-win32-x64-msvc": "13.4.17"
1180 | },
1181 | "peerDependencies": {
1182 | "@opentelemetry/api": "^1.1.0",
1183 | "react": "^18.2.0",
1184 | "react-dom": "^18.2.0",
1185 | "sass": "^1.3.0"
1186 | },
1187 | "peerDependenciesMeta": {
1188 | "@opentelemetry/api": {
1189 | "optional": true
1190 | },
1191 | "sass": {
1192 | "optional": true
1193 | }
1194 | }
1195 | },
1196 | "node_modules/next/node_modules/postcss": {
1197 | "version": "8.4.14",
1198 | "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.14.tgz",
1199 | "integrity": "sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig==",
1200 | "funding": [
1201 | {
1202 | "type": "opencollective",
1203 | "url": "https://opencollective.com/postcss/"
1204 | },
1205 | {
1206 | "type": "tidelift",
1207 | "url": "https://tidelift.com/funding/github/npm/postcss"
1208 | }
1209 | ],
1210 | "dependencies": {
1211 | "nanoid": "^3.3.4",
1212 | "picocolors": "^1.0.0",
1213 | "source-map-js": "^1.0.2"
1214 | },
1215 | "engines": {
1216 | "node": "^10 || ^12 || >=14"
1217 | }
1218 | },
1219 | "node_modules/node-releases": {
1220 | "version": "2.0.13",
1221 | "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz",
1222 | "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ=="
1223 | },
1224 | "node_modules/normalize-path": {
1225 | "version": "3.0.0",
1226 | "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
1227 | "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
1228 | "engines": {
1229 | "node": ">=0.10.0"
1230 | }
1231 | },
1232 | "node_modules/normalize-range": {
1233 | "version": "0.1.2",
1234 | "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
1235 | "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
1236 | "engines": {
1237 | "node": ">=0.10.0"
1238 | }
1239 | },
1240 | "node_modules/object-assign": {
1241 | "version": "4.1.1",
1242 | "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
1243 | "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
1244 | "engines": {
1245 | "node": ">=0.10.0"
1246 | }
1247 | },
1248 | "node_modules/object-hash": {
1249 | "version": "3.0.0",
1250 | "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
1251 | "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
1252 | "engines": {
1253 | "node": ">= 6"
1254 | }
1255 | },
1256 | "node_modules/once": {
1257 | "version": "1.4.0",
1258 | "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
1259 | "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
1260 | "dependencies": {
1261 | "wrappy": "1"
1262 | }
1263 | },
1264 | "node_modules/path-is-absolute": {
1265 | "version": "1.0.1",
1266 | "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
1267 | "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
1268 | "engines": {
1269 | "node": ">=0.10.0"
1270 | }
1271 | },
1272 | "node_modules/path-parse": {
1273 | "version": "1.0.7",
1274 | "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
1275 | "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
1276 | },
1277 | "node_modules/picocolors": {
1278 | "version": "1.0.0",
1279 | "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
1280 | "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ=="
1281 | },
1282 | "node_modules/picomatch": {
1283 | "version": "2.3.1",
1284 | "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
1285 | "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
1286 | "engines": {
1287 | "node": ">=8.6"
1288 | },
1289 | "funding": {
1290 | "url": "https://github.com/sponsors/jonschlinkert"
1291 | }
1292 | },
1293 | "node_modules/pify": {
1294 | "version": "2.3.0",
1295 | "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz",
1296 | "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==",
1297 | "engines": {
1298 | "node": ">=0.10.0"
1299 | }
1300 | },
1301 | "node_modules/pirates": {
1302 | "version": "4.0.6",
1303 | "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz",
1304 | "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==",
1305 | "engines": {
1306 | "node": ">= 6"
1307 | }
1308 | },
1309 | "node_modules/postcss": {
1310 | "version": "8.4.28",
1311 | "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.28.tgz",
1312 | "integrity": "sha512-Z7V5j0cq8oEKyejIKfpD8b4eBy9cwW2JWPk0+fB1HOAMsfHbnAXLLS+PfVWlzMSLQaWttKDt607I0XHmpE67Vw==",
1313 | "funding": [
1314 | {
1315 | "type": "opencollective",
1316 | "url": "https://opencollective.com/postcss/"
1317 | },
1318 | {
1319 | "type": "tidelift",
1320 | "url": "https://tidelift.com/funding/github/npm/postcss"
1321 | },
1322 | {
1323 | "type": "github",
1324 | "url": "https://github.com/sponsors/ai"
1325 | }
1326 | ],
1327 | "dependencies": {
1328 | "nanoid": "^3.3.6",
1329 | "picocolors": "^1.0.0",
1330 | "source-map-js": "^1.0.2"
1331 | },
1332 | "engines": {
1333 | "node": "^10 || ^12 || >=14"
1334 | }
1335 | },
1336 | "node_modules/postcss-import": {
1337 | "version": "15.1.0",
1338 | "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz",
1339 | "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==",
1340 | "dependencies": {
1341 | "postcss-value-parser": "^4.0.0",
1342 | "read-cache": "^1.0.0",
1343 | "resolve": "^1.1.7"
1344 | },
1345 | "engines": {
1346 | "node": ">=14.0.0"
1347 | },
1348 | "peerDependencies": {
1349 | "postcss": "^8.0.0"
1350 | }
1351 | },
1352 | "node_modules/postcss-js": {
1353 | "version": "4.0.1",
1354 | "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz",
1355 | "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==",
1356 | "dependencies": {
1357 | "camelcase-css": "^2.0.1"
1358 | },
1359 | "engines": {
1360 | "node": "^12 || ^14 || >= 16"
1361 | },
1362 | "funding": {
1363 | "type": "opencollective",
1364 | "url": "https://opencollective.com/postcss/"
1365 | },
1366 | "peerDependencies": {
1367 | "postcss": "^8.4.21"
1368 | }
1369 | },
1370 | "node_modules/postcss-load-config": {
1371 | "version": "4.0.1",
1372 | "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.1.tgz",
1373 | "integrity": "sha512-vEJIc8RdiBRu3oRAI0ymerOn+7rPuMvRXslTvZUKZonDHFIczxztIyJ1urxM1x9JXEikvpWWTUUqal5j/8QgvA==",
1374 | "dependencies": {
1375 | "lilconfig": "^2.0.5",
1376 | "yaml": "^2.1.1"
1377 | },
1378 | "engines": {
1379 | "node": ">= 14"
1380 | },
1381 | "funding": {
1382 | "type": "opencollective",
1383 | "url": "https://opencollective.com/postcss/"
1384 | },
1385 | "peerDependencies": {
1386 | "postcss": ">=8.0.9",
1387 | "ts-node": ">=9.0.0"
1388 | },
1389 | "peerDependenciesMeta": {
1390 | "postcss": {
1391 | "optional": true
1392 | },
1393 | "ts-node": {
1394 | "optional": true
1395 | }
1396 | }
1397 | },
1398 | "node_modules/postcss-nested": {
1399 | "version": "6.0.1",
1400 | "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz",
1401 | "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==",
1402 | "dependencies": {
1403 | "postcss-selector-parser": "^6.0.11"
1404 | },
1405 | "engines": {
1406 | "node": ">=12.0"
1407 | },
1408 | "funding": {
1409 | "type": "opencollective",
1410 | "url": "https://opencollective.com/postcss/"
1411 | },
1412 | "peerDependencies": {
1413 | "postcss": "^8.2.14"
1414 | }
1415 | },
1416 | "node_modules/postcss-selector-parser": {
1417 | "version": "6.0.13",
1418 | "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz",
1419 | "integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==",
1420 | "dependencies": {
1421 | "cssesc": "^3.0.0",
1422 | "util-deprecate": "^1.0.2"
1423 | },
1424 | "engines": {
1425 | "node": ">=4"
1426 | }
1427 | },
1428 | "node_modules/postcss-value-parser": {
1429 | "version": "4.2.0",
1430 | "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
1431 | "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
1432 | },
1433 | "node_modules/prop-types": {
1434 | "version": "15.8.1",
1435 | "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
1436 | "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
1437 | "dependencies": {
1438 | "loose-envify": "^1.4.0",
1439 | "object-assign": "^4.1.1",
1440 | "react-is": "^16.13.1"
1441 | }
1442 | },
1443 | "node_modules/queue-microtask": {
1444 | "version": "1.2.3",
1445 | "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
1446 | "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
1447 | "funding": [
1448 | {
1449 | "type": "github",
1450 | "url": "https://github.com/sponsors/feross"
1451 | },
1452 | {
1453 | "type": "patreon",
1454 | "url": "https://www.patreon.com/feross"
1455 | },
1456 | {
1457 | "type": "consulting",
1458 | "url": "https://feross.org/support"
1459 | }
1460 | ]
1461 | },
1462 | "node_modules/react": {
1463 | "version": "18.2.0",
1464 | "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
1465 | "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
1466 | "dependencies": {
1467 | "loose-envify": "^1.1.0"
1468 | },
1469 | "engines": {
1470 | "node": ">=0.10.0"
1471 | }
1472 | },
1473 | "node_modules/react-day-picker": {
1474 | "version": "8.8.0",
1475 | "resolved": "https://registry.npmjs.org/react-day-picker/-/react-day-picker-8.8.0.tgz",
1476 | "integrity": "sha512-QIC3uOuyGGbtypbd5QEggsCSqVaPNu8kzUWquZ7JjW9fuWB9yv7WyixKmnaFelTLXFdq7h7zU6n/aBleBqe/dA==",
1477 | "funding": {
1478 | "type": "individual",
1479 | "url": "https://github.com/sponsors/gpbl"
1480 | },
1481 | "peerDependencies": {
1482 | "date-fns": "^2.28.0",
1483 | "react": "^16.8.0 || ^17.0.0 || ^18.0.0"
1484 | }
1485 | },
1486 | "node_modules/react-dom": {
1487 | "version": "18.2.0",
1488 | "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz",
1489 | "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==",
1490 | "dependencies": {
1491 | "loose-envify": "^1.1.0",
1492 | "scheduler": "^0.23.0"
1493 | },
1494 | "peerDependencies": {
1495 | "react": "^18.2.0"
1496 | }
1497 | },
1498 | "node_modules/react-is": {
1499 | "version": "16.13.1",
1500 | "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
1501 | "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
1502 | },
1503 | "node_modules/react-lifecycles-compat": {
1504 | "version": "3.0.4",
1505 | "resolved": "https://registry.npmjs.org/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz",
1506 | "integrity": "sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA=="
1507 | },
1508 | "node_modules/react-resize-detector": {
1509 | "version": "8.1.0",
1510 | "resolved": "https://registry.npmjs.org/react-resize-detector/-/react-resize-detector-8.1.0.tgz",
1511 | "integrity": "sha512-S7szxlaIuiy5UqLhLL1KY3aoyGHbZzsTpYal9eYMwCyKqoqoVLCmIgAgNyIM1FhnP2KyBygASJxdhejrzjMb+w==",
1512 | "dependencies": {
1513 | "lodash": "^4.17.21"
1514 | },
1515 | "peerDependencies": {
1516 | "react": "^16.0.0 || ^17.0.0 || ^18.0.0",
1517 | "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0"
1518 | }
1519 | },
1520 | "node_modules/react-smooth": {
1521 | "version": "2.0.3",
1522 | "resolved": "https://registry.npmjs.org/react-smooth/-/react-smooth-2.0.3.tgz",
1523 | "integrity": "sha512-yl4y3XiMorss7ayF5QnBiSprig0+qFHui8uh7Hgg46QX5O+aRMRKlfGGNGLHno35JkQSvSYY8eCWkBfHfrSHfg==",
1524 | "dependencies": {
1525 | "fast-equals": "^5.0.0",
1526 | "react-transition-group": "2.9.0"
1527 | },
1528 | "peerDependencies": {
1529 | "prop-types": "^15.6.0",
1530 | "react": "^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0",
1531 | "react-dom": "^15.0.0 || ^16.0.0 || ^17.0.0 || ^18.0.0"
1532 | }
1533 | },
1534 | "node_modules/react-smooth/node_modules/dom-helpers": {
1535 | "version": "3.4.0",
1536 | "resolved": "https://registry.npmjs.org/dom-helpers/-/dom-helpers-3.4.0.tgz",
1537 | "integrity": "sha512-LnuPJ+dwqKDIyotW1VzmOZ5TONUN7CwkCR5hrgawTUbkBGYdeoNLZo6nNfGkCrjtE1nXXaj7iMMpDa8/d9WoIA==",
1538 | "dependencies": {
1539 | "@babel/runtime": "^7.1.2"
1540 | }
1541 | },
1542 | "node_modules/react-smooth/node_modules/react-transition-group": {
1543 | "version": "2.9.0",
1544 | "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-2.9.0.tgz",
1545 | "integrity": "sha512-+HzNTCHpeQyl4MJ/bdE0u6XRMe9+XG/+aL4mCxVN4DnPBQ0/5bfHWPDuOZUzYdMj94daZaZdCCc1Dzt9R/xSSg==",
1546 | "dependencies": {
1547 | "dom-helpers": "^3.4.0",
1548 | "loose-envify": "^1.4.0",
1549 | "prop-types": "^15.6.2",
1550 | "react-lifecycles-compat": "^3.0.4"
1551 | },
1552 | "peerDependencies": {
1553 | "react": ">=15.0.0",
1554 | "react-dom": ">=15.0.0"
1555 | }
1556 | },
1557 | "node_modules/react-transition-group": {
1558 | "version": "4.4.5",
1559 | "resolved": "https://registry.npmjs.org/react-transition-group/-/react-transition-group-4.4.5.tgz",
1560 | "integrity": "sha512-pZcd1MCJoiKiBR2NRxeCRg13uCXbydPnmB4EOeRrY7480qNWO8IIgQG6zlDkm6uRMsURXPuKq0GWtiM59a5Q6g==",
1561 | "dependencies": {
1562 | "@babel/runtime": "^7.5.5",
1563 | "dom-helpers": "^5.0.1",
1564 | "loose-envify": "^1.4.0",
1565 | "prop-types": "^15.6.2"
1566 | },
1567 | "peerDependencies": {
1568 | "react": ">=16.6.0",
1569 | "react-dom": ">=16.6.0"
1570 | }
1571 | },
1572 | "node_modules/read-cache": {
1573 | "version": "1.0.0",
1574 | "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
1575 | "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==",
1576 | "dependencies": {
1577 | "pify": "^2.3.0"
1578 | }
1579 | },
1580 | "node_modules/readdirp": {
1581 | "version": "3.6.0",
1582 | "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
1583 | "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
1584 | "dependencies": {
1585 | "picomatch": "^2.2.1"
1586 | },
1587 | "engines": {
1588 | "node": ">=8.10.0"
1589 | }
1590 | },
1591 | "node_modules/recharts": {
1592 | "version": "2.7.3",
1593 | "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.7.3.tgz",
1594 | "integrity": "sha512-cKoO9jUZRQavn06H6Ih2EcG82zUNdQH3OEGWVCmluSDyp3d7fIpDAsbMTd8hE8+T+MD8P76iicv/J4pJspDP7A==",
1595 | "dependencies": {
1596 | "classnames": "^2.2.5",
1597 | "eventemitter3": "^4.0.1",
1598 | "lodash": "^4.17.19",
1599 | "react-is": "^16.10.2",
1600 | "react-resize-detector": "^8.0.4",
1601 | "react-smooth": "^2.0.2",
1602 | "recharts-scale": "^0.4.4",
1603 | "reduce-css-calc": "^2.1.8",
1604 | "victory-vendor": "^36.6.8"
1605 | },
1606 | "engines": {
1607 | "node": ">=12"
1608 | },
1609 | "peerDependencies": {
1610 | "prop-types": "^15.6.0",
1611 | "react": "^16.0.0 || ^17.0.0 || ^18.0.0",
1612 | "react-dom": "^16.0.0 || ^17.0.0 || ^18.0.0"
1613 | }
1614 | },
1615 | "node_modules/recharts-scale": {
1616 | "version": "0.4.5",
1617 | "resolved": "https://registry.npmjs.org/recharts-scale/-/recharts-scale-0.4.5.tgz",
1618 | "integrity": "sha512-kivNFO+0OcUNu7jQquLXAxz1FIwZj8nrj+YkOKc5694NbjCvcT6aSZiIzNzd2Kul4o4rTto8QVR9lMNtxD4G1w==",
1619 | "dependencies": {
1620 | "decimal.js-light": "^2.4.1"
1621 | }
1622 | },
1623 | "node_modules/reduce-css-calc": {
1624 | "version": "2.1.8",
1625 | "resolved": "https://registry.npmjs.org/reduce-css-calc/-/reduce-css-calc-2.1.8.tgz",
1626 | "integrity": "sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg==",
1627 | "dependencies": {
1628 | "css-unit-converter": "^1.1.1",
1629 | "postcss-value-parser": "^3.3.0"
1630 | }
1631 | },
1632 | "node_modules/reduce-css-calc/node_modules/postcss-value-parser": {
1633 | "version": "3.3.1",
1634 | "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz",
1635 | "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ=="
1636 | },
1637 | "node_modules/regenerator-runtime": {
1638 | "version": "0.14.0",
1639 | "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz",
1640 | "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA=="
1641 | },
1642 | "node_modules/resolve": {
1643 | "version": "1.22.4",
1644 | "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.4.tgz",
1645 | "integrity": "sha512-PXNdCiPqDqeUou+w1C2eTQbNfxKSuMxqTCuvlmmMsk1NWHL5fRrhY6Pl0qEYYc6+QqGClco1Qj8XnjPego4wfg==",
1646 | "dependencies": {
1647 | "is-core-module": "^2.13.0",
1648 | "path-parse": "^1.0.7",
1649 | "supports-preserve-symlinks-flag": "^1.0.0"
1650 | },
1651 | "bin": {
1652 | "resolve": "bin/resolve"
1653 | },
1654 | "funding": {
1655 | "url": "https://github.com/sponsors/ljharb"
1656 | }
1657 | },
1658 | "node_modules/reusify": {
1659 | "version": "1.0.4",
1660 | "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
1661 | "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
1662 | "engines": {
1663 | "iojs": ">=1.0.0",
1664 | "node": ">=0.10.0"
1665 | }
1666 | },
1667 | "node_modules/run-parallel": {
1668 | "version": "1.2.0",
1669 | "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
1670 | "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
1671 | "funding": [
1672 | {
1673 | "type": "github",
1674 | "url": "https://github.com/sponsors/feross"
1675 | },
1676 | {
1677 | "type": "patreon",
1678 | "url": "https://www.patreon.com/feross"
1679 | },
1680 | {
1681 | "type": "consulting",
1682 | "url": "https://feross.org/support"
1683 | }
1684 | ],
1685 | "dependencies": {
1686 | "queue-microtask": "^1.2.2"
1687 | }
1688 | },
1689 | "node_modules/scheduler": {
1690 | "version": "0.23.0",
1691 | "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz",
1692 | "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==",
1693 | "dependencies": {
1694 | "loose-envify": "^1.1.0"
1695 | }
1696 | },
1697 | "node_modules/source-map-js": {
1698 | "version": "1.0.2",
1699 | "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz",
1700 | "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==",
1701 | "engines": {
1702 | "node": ">=0.10.0"
1703 | }
1704 | },
1705 | "node_modules/streamsearch": {
1706 | "version": "1.1.0",
1707 | "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
1708 | "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
1709 | "engines": {
1710 | "node": ">=10.0.0"
1711 | }
1712 | },
1713 | "node_modules/styled-jsx": {
1714 | "version": "5.1.1",
1715 | "resolved": "https://registry.npmjs.org/styled-jsx/-/styled-jsx-5.1.1.tgz",
1716 | "integrity": "sha512-pW7uC1l4mBZ8ugbiZrcIsiIvVx1UmTfw7UkC3Um2tmfUq9Bhk8IiyEIPl6F8agHgjzku6j0xQEZbfA5uSgSaCw==",
1717 | "dependencies": {
1718 | "client-only": "0.0.1"
1719 | },
1720 | "engines": {
1721 | "node": ">= 12.0.0"
1722 | },
1723 | "peerDependencies": {
1724 | "react": ">= 16.8.0 || 17.x.x || ^18.0.0-0"
1725 | },
1726 | "peerDependenciesMeta": {
1727 | "@babel/core": {
1728 | "optional": true
1729 | },
1730 | "babel-plugin-macros": {
1731 | "optional": true
1732 | }
1733 | }
1734 | },
1735 | "node_modules/sucrase": {
1736 | "version": "3.34.0",
1737 | "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.34.0.tgz",
1738 | "integrity": "sha512-70/LQEZ07TEcxiU2dz51FKaE6hCTWC6vr7FOk3Gr0U60C3shtAN+H+BFr9XlYe5xqf3RA8nrc+VIwzCfnxuXJw==",
1739 | "dependencies": {
1740 | "@jridgewell/gen-mapping": "^0.3.2",
1741 | "commander": "^4.0.0",
1742 | "glob": "7.1.6",
1743 | "lines-and-columns": "^1.1.6",
1744 | "mz": "^2.7.0",
1745 | "pirates": "^4.0.1",
1746 | "ts-interface-checker": "^0.1.9"
1747 | },
1748 | "bin": {
1749 | "sucrase": "bin/sucrase",
1750 | "sucrase-node": "bin/sucrase-node"
1751 | },
1752 | "engines": {
1753 | "node": ">=8"
1754 | }
1755 | },
1756 | "node_modules/supports-preserve-symlinks-flag": {
1757 | "version": "1.0.0",
1758 | "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
1759 | "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
1760 | "engines": {
1761 | "node": ">= 0.4"
1762 | },
1763 | "funding": {
1764 | "url": "https://github.com/sponsors/ljharb"
1765 | }
1766 | },
1767 | "node_modules/tabbable": {
1768 | "version": "6.2.0",
1769 | "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.2.0.tgz",
1770 | "integrity": "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew=="
1771 | },
1772 | "node_modules/tailwind-merge": {
1773 | "version": "1.14.0",
1774 | "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-1.14.0.tgz",
1775 | "integrity": "sha512-3mFKyCo/MBcgyOTlrY8T7odzZFx+w+qKSMAmdFzRvqBfLlSigU6TZnlFHK0lkMwj9Bj8OYU+9yW9lmGuS0QEnQ==",
1776 | "funding": {
1777 | "type": "github",
1778 | "url": "https://github.com/sponsors/dcastil"
1779 | }
1780 | },
1781 | "node_modules/tailwindcss": {
1782 | "version": "3.3.3",
1783 | "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.3.tgz",
1784 | "integrity": "sha512-A0KgSkef7eE4Mf+nKJ83i75TMyq8HqY3qmFIJSWy8bNt0v1lG7jUcpGpoTFxAwYcWOphcTBLPPJg+bDfhDf52w==",
1785 | "dependencies": {
1786 | "@alloc/quick-lru": "^5.2.0",
1787 | "arg": "^5.0.2",
1788 | "chokidar": "^3.5.3",
1789 | "didyoumean": "^1.2.2",
1790 | "dlv": "^1.1.3",
1791 | "fast-glob": "^3.2.12",
1792 | "glob-parent": "^6.0.2",
1793 | "is-glob": "^4.0.3",
1794 | "jiti": "^1.18.2",
1795 | "lilconfig": "^2.1.0",
1796 | "micromatch": "^4.0.5",
1797 | "normalize-path": "^3.0.0",
1798 | "object-hash": "^3.0.0",
1799 | "picocolors": "^1.0.0",
1800 | "postcss": "^8.4.23",
1801 | "postcss-import": "^15.1.0",
1802 | "postcss-js": "^4.0.1",
1803 | "postcss-load-config": "^4.0.1",
1804 | "postcss-nested": "^6.0.1",
1805 | "postcss-selector-parser": "^6.0.11",
1806 | "resolve": "^1.22.2",
1807 | "sucrase": "^3.32.0"
1808 | },
1809 | "bin": {
1810 | "tailwind": "lib/cli.js",
1811 | "tailwindcss": "lib/cli.js"
1812 | },
1813 | "engines": {
1814 | "node": ">=14.0.0"
1815 | }
1816 | },
1817 | "node_modules/thenify": {
1818 | "version": "3.3.1",
1819 | "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz",
1820 | "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==",
1821 | "dependencies": {
1822 | "any-promise": "^1.0.0"
1823 | }
1824 | },
1825 | "node_modules/thenify-all": {
1826 | "version": "1.6.0",
1827 | "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
1828 | "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==",
1829 | "dependencies": {
1830 | "thenify": ">= 3.1.0 < 4"
1831 | },
1832 | "engines": {
1833 | "node": ">=0.8"
1834 | }
1835 | },
1836 | "node_modules/to-regex-range": {
1837 | "version": "5.0.1",
1838 | "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
1839 | "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
1840 | "dependencies": {
1841 | "is-number": "^7.0.0"
1842 | },
1843 | "engines": {
1844 | "node": ">=8.0"
1845 | }
1846 | },
1847 | "node_modules/ts-interface-checker": {
1848 | "version": "0.1.13",
1849 | "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
1850 | "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA=="
1851 | },
1852 | "node_modules/tslib": {
1853 | "version": "2.6.1",
1854 | "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz",
1855 | "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig=="
1856 | },
1857 | "node_modules/update-browserslist-db": {
1858 | "version": "1.0.11",
1859 | "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz",
1860 | "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==",
1861 | "funding": [
1862 | {
1863 | "type": "opencollective",
1864 | "url": "https://opencollective.com/browserslist"
1865 | },
1866 | {
1867 | "type": "tidelift",
1868 | "url": "https://tidelift.com/funding/github/npm/browserslist"
1869 | },
1870 | {
1871 | "type": "github",
1872 | "url": "https://github.com/sponsors/ai"
1873 | }
1874 | ],
1875 | "dependencies": {
1876 | "escalade": "^3.1.1",
1877 | "picocolors": "^1.0.0"
1878 | },
1879 | "bin": {
1880 | "update-browserslist-db": "cli.js"
1881 | },
1882 | "peerDependencies": {
1883 | "browserslist": ">= 4.21.0"
1884 | }
1885 | },
1886 | "node_modules/util-deprecate": {
1887 | "version": "1.0.2",
1888 | "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
1889 | "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
1890 | },
1891 | "node_modules/victory-vendor": {
1892 | "version": "36.6.11",
1893 | "resolved": "https://registry.npmjs.org/victory-vendor/-/victory-vendor-36.6.11.tgz",
1894 | "integrity": "sha512-nT8kCiJp8dQh8g991J/R5w5eE2KnO8EAIP0xocWlh9l2okngMWglOPoMZzJvek8Q1KUc4XE/mJxTZnvOB1sTYg==",
1895 | "dependencies": {
1896 | "@types/d3-array": "^3.0.3",
1897 | "@types/d3-ease": "^3.0.0",
1898 | "@types/d3-interpolate": "^3.0.1",
1899 | "@types/d3-scale": "^4.0.2",
1900 | "@types/d3-shape": "^3.1.0",
1901 | "@types/d3-time": "^3.0.0",
1902 | "@types/d3-timer": "^3.0.0",
1903 | "d3-array": "^3.1.6",
1904 | "d3-ease": "^3.0.1",
1905 | "d3-interpolate": "^3.0.1",
1906 | "d3-scale": "^4.0.2",
1907 | "d3-shape": "^3.1.0",
1908 | "d3-time": "^3.0.0",
1909 | "d3-timer": "^3.0.1"
1910 | }
1911 | },
1912 | "node_modules/watchpack": {
1913 | "version": "2.4.0",
1914 | "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz",
1915 | "integrity": "sha512-Lcvm7MGST/4fup+ifyKi2hjyIAwcdI4HRgtvTpIUxBRhB+RFtUh8XtDOxUfctVCnhVi+QQj49i91OyvzkJl6cg==",
1916 | "dependencies": {
1917 | "glob-to-regexp": "^0.4.1",
1918 | "graceful-fs": "^4.1.2"
1919 | },
1920 | "engines": {
1921 | "node": ">=10.13.0"
1922 | }
1923 | },
1924 | "node_modules/wrappy": {
1925 | "version": "1.0.2",
1926 | "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
1927 | "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
1928 | },
1929 | "node_modules/yaml": {
1930 | "version": "2.3.1",
1931 | "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz",
1932 | "integrity": "sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==",
1933 | "engines": {
1934 | "node": ">= 14"
1935 | }
1936 | },
1937 | "node_modules/zod": {
1938 | "version": "3.21.4",
1939 | "resolved": "https://registry.npmjs.org/zod/-/zod-3.21.4.tgz",
1940 | "integrity": "sha512-m46AKbrzKVzOzs/DZgVnG5H55N1sv1M8qZU3A8RIKbs3mrACDNeIOeilDymVb2HdmP8uwshOCF4uJ8uM9rCqJw==",
1941 | "funding": {
1942 | "url": "https://github.com/sponsors/colinhacks"
1943 | }
1944 | }
1945 | }
1946 | }
1947 |
--------------------------------------------------------------------------------