├── .cursor └── rules │ └── summary.mdc ├── .env.example ├── .eslintrc.cjs ├── .github ├── FUNDING.yml └── workflows │ └── deploy.yml ├── .gitignore ├── LICENSE ├── README.md ├── backend ├── Dockerfile ├── app │ ├── __init__.py │ ├── core │ │ └── limiter.py │ ├── main.py │ ├── prompts.py │ ├── routers │ │ ├── generate.py │ │ └── modify.py │ ├── services │ │ ├── claude_service.py │ │ ├── github_service.py │ │ ├── o1_mini_openai_service.py │ │ ├── o3_mini_openai_service.py │ │ ├── o3_mini_openrouter_service.py │ │ └── o4_mini_openai_service.py │ └── utils │ │ └── format_message.py ├── deploy.sh ├── entrypoint.sh ├── nginx │ ├── api.conf │ └── setup_nginx.sh └── requirements.txt ├── components.json ├── docker-compose.yml ├── docs └── readme_img.png ├── drizzle.config.ts ├── next.config.js ├── package.json ├── pnpm-lock.yaml ├── postcss.config.js ├── prettier.config.js ├── public ├── favicon.ico └── og-image.png ├── src ├── app │ ├── [username] │ │ └── [repo] │ │ │ └── page.tsx │ ├── _actions │ │ ├── cache.ts │ │ ├── github.ts │ │ └── repo.ts │ ├── layout.tsx │ ├── page.tsx │ └── providers.tsx ├── components │ ├── action-button.tsx │ ├── api-key-button.tsx │ ├── api-key-dialog.tsx │ ├── copy-button.tsx │ ├── customization-dropdown.tsx │ ├── export-dropdown.tsx │ ├── footer.tsx │ ├── header.tsx │ ├── hero.tsx │ ├── loading-animation.tsx │ ├── loading.tsx │ ├── main-card.tsx │ ├── mermaid-diagram.tsx │ ├── private-repos-dialog.tsx │ └── ui │ │ ├── button.tsx │ │ ├── card.tsx │ │ ├── dialog.tsx │ │ ├── input.tsx │ │ ├── progress.tsx │ │ ├── sonner.tsx │ │ ├── switch.tsx │ │ ├── textarea.tsx │ │ └── tooltip.tsx ├── env.js ├── hooks │ ├── useDiagram.ts │ └── useStarReminder.tsx ├── lib │ ├── exampleRepos.ts │ ├── fetch-backend.ts │ └── utils.ts ├── server │ └── db │ │ ├── index.ts │ │ └── schema.ts └── styles │ └── globals.css ├── start-database.sh ├── tailwind.config.ts └── tsconfig.json /.cursor/rules/summary.mdc: -------------------------------------------------------------------------------- 1 | --- 2 | description: summary of project 3 | globs: 4 | alwaysApply: true 5 | --- 6 | # GitDiagram Project Summary 7 | 8 | ## Project Overview 9 | This is GitDiagram, a web application that converts any GitHub repository structure into an interactive system design/architecture diagram for visualization. It allows users to quickly understand the architecture of any repository by generating visual diagrams, and provides interactivity by letting users click on components to navigate directly to source files and relevant directories. 10 | 11 | ## Key Features 12 | - Instant conversion of GitHub repositories into system design diagrams 13 | - Interactive components that link to source files and directories 14 | - Support for both public and private repositories (with GitHub token) 15 | - Customizable diagrams through user instructions 16 | - URL shortcut: replace `hub` with `diagram` in any GitHub URL to access its diagram 17 | 18 | ## Tech Stack 19 | - **Frontend**: Next.js 15, TypeScript, Tailwind CSS, ShadCN UI components 20 | - **Backend**: FastAPI (Python), Server Actions 21 | - **Database**: PostgreSQL with Drizzle ORM, Neon Database for serverless PostgreSQL 22 | - **AI**: Claude 3.5 Sonnet (previously) / OpenAI o3-mini (currently) for diagram generation 23 | - **Deployment**: Vercel (Frontend), EC2 (Backend) 24 | - **CI/CD**: GitHub Actions 25 | - **Analytics**: PostHog, Api-Analytics 26 | 27 | ## Architecture 28 | The project follows a modern full-stack architecture: 29 | 30 | 1. **Frontend (Next.js)**: 31 | - Organized using the App Router pattern 32 | - Uses server components and server actions 33 | - Implements Mermaid.js for rendering diagrams 34 | - Provides UI for repository input and diagram customization 35 | 36 | 2. **Backend (FastAPI)**: 37 | - Handles repository data extraction 38 | - Implements complex prompt engineering through a pipeline: 39 | - First prompt analyzes the repository and creates an explanation 40 | - Second prompt maps relevant directories and files to diagram components 41 | - Third prompt generates the final Mermaid.js code 42 | - Manages API rate limiting and authentication 43 | 44 | 3. **Database (PostgreSQL)**: 45 | - Stores user data, repository information, and generated diagrams 46 | - Uses Drizzle ORM for type-safe database operations 47 | 48 | 4. **AI Integration**: 49 | - Uses LLMs to analyze repository structure 50 | - Generates detailed diagrams based on file trees and README content 51 | - Implements sophisticated prompt engineering to extract accurate information 52 | 53 | ## Project Structure 54 | - `/src`: Frontend source code (Next.js) and server actions for db calls with drizzle 55 | - `/backend`: Python FastAPI backend 56 | - `/public`: Static assets 57 | - `/docs`: Documentation and images 58 | 59 | ## Development Setup 60 | The project supports both local development and self-hosting: 61 | - Dependencies managed with pnpm 62 | - Docker Compose for containerization 63 | - Environment configuration via .env files 64 | - Database initialization scripts 65 | 66 | ## Future Development 67 | - Implementation of font-awesome icons in diagrams 68 | - Embedded feature for progressive diagram updates as commits are made 69 | - Expanded API access for third-party integration -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | POSTGRES_URL="postgresql://postgres:password@localhost:5432/gitdiagram" 2 | NEXT_PUBLIC_API_DEV_URL=http://localhost:8000 3 | 4 | OPENAI_API_KEY= 5 | 6 | # OPTIONAL: providing your own GitHub PAT increases rate limits from 60/hr to 5000/hr to the GitHub API 7 | GITHUB_PAT= 8 | 9 | # old implementation 10 | # OPENROUTER_API_KEY= 11 | # ANTHROPIC_API_KEY= -------------------------------------------------------------------------------- /.eslintrc.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import("eslint").Linter.Config} */ 2 | const config = { 3 | "parser": "@typescript-eslint/parser", 4 | "parserOptions": { 5 | "project": true 6 | }, 7 | "plugins": [ 8 | "@typescript-eslint", 9 | "drizzle" 10 | ], 11 | "extends": [ 12 | "next/core-web-vitals", 13 | "plugin:@typescript-eslint/recommended-type-checked", 14 | "plugin:@typescript-eslint/stylistic-type-checked" 15 | ], 16 | "rules": { 17 | "@typescript-eslint/array-type": "off", 18 | "@typescript-eslint/consistent-type-definitions": "off", 19 | "@typescript-eslint/consistent-type-imports": [ 20 | "warn", 21 | { 22 | "prefer": "type-imports", 23 | "fixStyle": "inline-type-imports" 24 | } 25 | ], 26 | "@typescript-eslint/no-unused-vars": [ 27 | "warn", 28 | { 29 | "argsIgnorePattern": "^_" 30 | } 31 | ], 32 | "@typescript-eslint/require-await": "off", 33 | "@typescript-eslint/no-misused-promises": [ 34 | "error", 35 | { 36 | "checksVoidReturn": { 37 | "attributes": false 38 | } 39 | } 40 | ], 41 | "drizzle/enforce-delete-with-where": [ 42 | "error", 43 | { 44 | "drizzleObjectName": [ 45 | "db", 46 | "ctx.db" 47 | ] 48 | } 49 | ], 50 | "drizzle/enforce-update-with-where": [ 51 | "error", 52 | { 53 | "drizzleObjectName": [ 54 | "db", 55 | "ctx.db" 56 | ] 57 | } 58 | ] 59 | } 60 | } 61 | module.exports = config; -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: ahmedkhaleel2004 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy to EC2 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | paths: 7 | - "backend/**" # Only trigger on backend changes 8 | - ".github/workflows/**" 9 | 10 | jobs: 11 | deploy: 12 | runs-on: ubuntu-latest 13 | 14 | # Add concurrency to prevent multiple deployments running at once 15 | concurrency: 16 | group: production 17 | cancel-in-progress: true 18 | 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - name: Deploy to EC2 23 | uses: appleboy/ssh-action@master 24 | with: 25 | host: ${{ secrets.EC2_HOST }} 26 | username: ubuntu 27 | key: ${{ secrets.EC2_SSH_KEY }} 28 | script: | 29 | cd ~/gitdiagram 30 | git fetch origin main 31 | git reset --hard origin/main # Force local to match remote main 32 | sudo chmod +x ./backend/nginx/setup_nginx.sh 33 | sudo ./backend/nginx/setup_nginx.sh 34 | chmod +x ./backend/deploy.sh 35 | ./backend/deploy.sh 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | 8 | # testing 9 | /coverage 10 | 11 | # database 12 | /prisma/db.sqlite 13 | /prisma/db.sqlite-journal 14 | db.sqlite 15 | 16 | # next.js 17 | /.next/ 18 | /out/ 19 | next-env.d.ts 20 | 21 | # production 22 | /build 23 | 24 | # misc 25 | .DS_Store 26 | *.pem 27 | 28 | # debug 29 | npm-debug.log* 30 | yarn-debug.log* 31 | yarn-error.log* 32 | .pnpm-debug.log* 33 | 34 | # local env files 35 | # do not commit any .env files to git, except for the .env.example file. https://create.t3.gg/en/usage/env-variables#using-environment-variables 36 | .env 37 | .env*.local 38 | .env-e 39 | 40 | # vercel 41 | .vercel 42 | 43 | # typescript 44 | *.tsbuildinfo 45 | 46 | # idea files 47 | .idea 48 | 49 | __pycache__/ 50 | venv 51 | 52 | # vscode 53 | .vscode/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Ahmed Khaleel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Image](./docs/readme_img.png "GitDiagram Front Page")](https://gitdiagram.com/) 2 | 3 | ![License](https://img.shields.io/badge/license-MIT-blue.svg) 4 | [![Kofi](https://img.shields.io/badge/Kofi-F16061.svg?logo=ko-fi&logoColor=white)](https://ko-fi.com/ahmedkhaleel2004) 5 | 6 | # GitDiagram 7 | 8 | Turn any GitHub repository into an interactive diagram for visualization in seconds. 9 | 10 | You can also replace `hub` with `diagram` in any Github URL to access its diagram. 11 | 12 | ## 🚀 Features 13 | 14 | - 👀 **Instant Visualization**: Convert any GitHub repository structure into a system design / architecture diagram 15 | - 🎨 **Interactivity**: Click on components to navigate directly to source files and relevant directories 16 | - ⚡ **Fast Generation**: Powered by OpenAI o4-mini for quick and accurate diagrams 17 | - 🔄 **Customization**: Modify and regenerate diagrams with custom instructions 18 | - 🌐 **API Access**: Public API available for integration (WIP) 19 | 20 | ## ⚙️ Tech Stack 21 | 22 | - **Frontend**: Next.js, TypeScript, Tailwind CSS, ShadCN 23 | - **Backend**: FastAPI, Python, Server Actions 24 | - **Database**: PostgreSQL (with Drizzle ORM) 25 | - **AI**: OpenAI o4-mini 26 | - **Deployment**: Vercel (Frontend), EC2 (Backend) 27 | - **CI/CD**: GitHub Actions 28 | - **Analytics**: PostHog, Api-Analytics 29 | 30 | ## 🤔 About 31 | 32 | I created this because I wanted to contribute to open-source projects but quickly realized their codebases are too massive for me to dig through manually, so this helps me get started - but it's definitely got many more use cases! 33 | 34 | Given any public (or private!) GitHub repository it generates diagrams in Mermaid.js with OpenAI's o4-mini! (Previously Claude 3.5 Sonnet) 35 | 36 | I extract information from the file tree and README for details and interactivity (you can click components to be taken to relevant files and directories) 37 | 38 | Most of what you might call the "processing" of this app is done with prompt engineering - see `/backend/app/prompts.py`. This basically extracts and pipelines data and analysis for a larger action workflow, ending in the diagram code. 39 | 40 | ## 🔒 How to diagram private repositories 41 | 42 | You can simply click on "Private Repos" in the header and follow the instructions by providing a GitHub personal access token with the `repo` scope. 43 | 44 | You can also self-host this app locally (backend separated as well!) with the steps below. 45 | 46 | ## 🛠️ Self-hosting / Local Development 47 | 48 | 1. Clone the repository 49 | 50 | ```bash 51 | git clone https://github.com/ahmedkhaleel2004/gitdiagram.git 52 | cd gitdiagram 53 | ``` 54 | 55 | 2. Install dependencies 56 | 57 | ```bash 58 | pnpm i 59 | ``` 60 | 61 | 3. Set up environment variables (create .env) 62 | 63 | ```bash 64 | cp .env.example .env 65 | ``` 66 | 67 | Then edit the `.env` file with your Anthropic API key and optional GitHub personal access token. 68 | 69 | 4. Run backend 70 | 71 | ```bash 72 | docker-compose up --build -d 73 | ``` 74 | 75 | Logs available at `docker-compose logs -f` 76 | The FastAPI server will be available at `localhost:8000` 77 | 78 | 5. Start local database 79 | 80 | ```bash 81 | chmod +x start-database.sh 82 | ./start-database.sh 83 | ``` 84 | 85 | When prompted to generate a random password, input yes. 86 | The Postgres database will start in a container at `localhost:5432` 87 | 88 | 6. Initialize the database schema 89 | 90 | ```bash 91 | pnpm db:push 92 | ``` 93 | 94 | You can view and interact with the database using `pnpm db:studio` 95 | 96 | 7. Run Frontend 97 | 98 | ```bash 99 | pnpm dev 100 | ``` 101 | 102 | You can now access the website at `localhost:3000` and edit the rate limits defined in `backend/app/routers/generate.py` in the generate function decorator. 103 | 104 | ## Contributing 105 | 106 | Contributions are welcome! Please feel free to submit a Pull Request. 107 | 108 | ## Acknowledgements 109 | 110 | Shoutout to [Romain Courtois](https://github.com/cyclotruc)'s [Gitingest](https://gitingest.com/) for inspiration and styling 111 | 112 | ## 📈 Rate Limits 113 | 114 | I am currently hosting it for free with no rate limits though this is somewhat likely to change in the future. 115 | 116 | 122 | 123 | ## 🤔 Future Steps 124 | 125 | - Implement font-awesome icons in diagram 126 | - Implement an embedded feature like star-history.com but for diagrams. The diagram could also be updated progressively as commits are made. 127 | -------------------------------------------------------------------------------- /backend/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use Python 3.12 slim image for smaller size 2 | FROM python:3.12-slim 3 | 4 | # Set working directory 5 | WORKDIR /app 6 | 7 | # Copy requirements first to leverage Docker cache 8 | COPY requirements.txt . 9 | 10 | # Install dependencies 11 | RUN pip install --no-cache-dir -r requirements.txt 12 | 13 | # Copy application code 14 | COPY . . 15 | 16 | # Create and set permissions for entrypoint script 17 | COPY entrypoint.sh /app/ 18 | RUN chmod +x /app/entrypoint.sh && \ 19 | # Ensure the script uses Unix line endings 20 | sed -i 's/\r$//' /app/entrypoint.sh && \ 21 | # Double check permissions 22 | ls -la /app/entrypoint.sh 23 | 24 | # Expose port 25 | EXPOSE 8000 26 | 27 | # Use entrypoint script 28 | CMD ["/bin/bash", "/app/entrypoint.sh"] 29 | -------------------------------------------------------------------------------- /backend/app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedkhaleel2004/gitdiagram/9236492a681b221aff38e3dba9752af83d7823aa/backend/app/__init__.py -------------------------------------------------------------------------------- /backend/app/core/limiter.py: -------------------------------------------------------------------------------- 1 | from slowapi import Limiter 2 | from slowapi.util import get_remote_address 3 | 4 | limiter = Limiter(key_func=get_remote_address) 5 | -------------------------------------------------------------------------------- /backend/app/main.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI, Request 2 | from fastapi.middleware.cors import CORSMiddleware 3 | from slowapi import _rate_limit_exceeded_handler 4 | from slowapi.errors import RateLimitExceeded 5 | from app.routers import generate, modify 6 | from app.core.limiter import limiter 7 | from typing import cast 8 | from starlette.exceptions import ExceptionMiddleware 9 | from api_analytics.fastapi import Analytics 10 | import os 11 | 12 | 13 | app = FastAPI() 14 | 15 | 16 | origins = ["http://localhost:3000", "https://gitdiagram.com"] 17 | 18 | app.add_middleware( 19 | CORSMiddleware, 20 | allow_origins=origins, 21 | allow_credentials=True, 22 | allow_methods=["GET", "POST"], 23 | allow_headers=["*"], 24 | ) 25 | 26 | API_ANALYTICS_KEY = os.getenv("API_ANALYTICS_KEY") 27 | if API_ANALYTICS_KEY: 28 | app.add_middleware(Analytics, api_key=API_ANALYTICS_KEY) 29 | 30 | app.state.limiter = limiter 31 | app.add_exception_handler( 32 | RateLimitExceeded, cast(ExceptionMiddleware, _rate_limit_exceeded_handler) 33 | ) 34 | 35 | app.include_router(generate.router) 36 | app.include_router(modify.router) 37 | 38 | 39 | @app.get("/") 40 | # @limiter.limit("100/day") 41 | async def root(request: Request): 42 | return {"message": "Hello from GitDiagram API!"} 43 | -------------------------------------------------------------------------------- /backend/app/prompts.py: -------------------------------------------------------------------------------- 1 | # This is our processing. This is where GitDiagram makes the magic happen 2 | # There is a lot of DETAIL we need to extract from the repository to produce detailed and accurate diagrams 3 | # I will immediately put out there that I'm trying to reduce costs. Theoretically, I could, for like 5x better accuracy, include most file content as well which would make for perfect diagrams, but thats too many tokens for my wallet, and would probably greatly increase generation time. (maybe a paid feature?) 4 | 5 | # THE PROCESS: 6 | 7 | # imagine it like this: 8 | # def prompt1(file_tree, readme) -> explanation of diagram 9 | # def prompt2(explanation, file_tree) -> maps relevant directories and files to parts of diagram for interactivity 10 | # def prompt3(explanation, map) -> Mermaid.js code 11 | 12 | # Note: Originally prompt1 and prompt2 were combined - but I tested it, and turns out mapping relevant dirs and files in one prompt along with generating detailed and accurate diagrams was difficult for Claude 3.5 Sonnet. It lost detail in the explanation and dedicated more "effort" to the mappings, so this is now its own prompt. 13 | 14 | # This is my first take at prompt engineering so if you have any ideas on optimizations please make an issue on the GitHub! 15 | 16 | SYSTEM_FIRST_PROMPT = """ 17 | You are tasked with explaining to a principal software engineer how to draw the best and most accurate system design diagram / architecture of a given project. This explanation should be tailored to the specific project's purpose and structure. To accomplish this, you will be provided with two key pieces of information: 18 | 19 | 1. The complete and entire file tree of the project including all directory and file names, which will be enclosed in tags in the users message. 20 | 21 | 2. The README file of the project, which will be enclosed in tags in the users message. 22 | 23 | Analyze these components carefully, as they will provide crucial information about the project's structure and purpose. Follow these steps to create an explanation for the principal software engineer: 24 | 25 | 1. Identify the project type and purpose: 26 | - Examine the file structure and README to determine if the project is a full-stack application, an open-source tool, a compiler, or another type of software imaginable. 27 | - Look for key indicators in the README, such as project description, features, or use cases. 28 | 29 | 2. Analyze the file structure: 30 | - Pay attention to top-level directories and their names (e.g., "frontend", "backend", "src", "lib", "tests"). 31 | - Identify patterns in the directory structure that might indicate architectural choices (e.g., MVC pattern, microservices). 32 | - Note any configuration files, build scripts, or deployment-related files. 33 | 34 | 3. Examine the README for additional insights: 35 | - Look for sections describing the architecture, dependencies, or technical stack. 36 | - Check for any diagrams or explanations of the system's components. 37 | 38 | 4. Based on your analysis, explain how to create a system design diagram that accurately represents the project's architecture. Include the following points: 39 | 40 | a. Identify the main components of the system (e.g., frontend, backend, database, building, external services). 41 | b. Determine the relationships and interactions between these components. 42 | c. Highlight any important architectural patterns or design principles used in the project. 43 | d. Include relevant technologies, frameworks, or libraries that play a significant role in the system's architecture. 44 | 45 | 5. Provide guidelines for tailoring the diagram to the specific project type: 46 | - For a full-stack application, emphasize the separation between frontend and backend, database interactions, and any API layers. 47 | - For an open-source tool, focus on the core functionality, extensibility points, and how it integrates with other systems. 48 | - For a compiler or language-related project, highlight the different stages of compilation or interpretation, and any intermediate representations. 49 | 50 | 6. Instruct the principal software engineer to include the following elements in the diagram: 51 | - Clear labels for each component 52 | - Directional arrows to show data flow or dependencies 53 | - Color coding or shapes to distinguish between different types of components 54 | 55 | 7. NOTE: Emphasize the importance of being very detailed and capturing the essential architectural elements. Don't overthink it too much, simply separating the project into as many components as possible is best. 56 | 57 | Present your explanation and instructions within tags, ensuring that you tailor your advice to the specific project based on the provided file tree and README content. 58 | """ 59 | 60 | # - A legend explaining any symbols or abbreviations used 61 | # ^ removed since it was making the diagrams very long 62 | 63 | # just adding some clear separation between the prompts 64 | # ************************************************************ 65 | # ************************************************************ 66 | 67 | SYSTEM_SECOND_PROMPT = """ 68 | You are tasked with mapping key components of a system design to their corresponding files and directories in a project's file structure. You will be provided with a detailed explanation of the system design/architecture and a file tree of the project. 69 | 70 | First, carefully read the system design explanation which will be enclosed in tags in the users message. 71 | 72 | Then, examine the file tree of the project which will be enclosed in tags in the users message. 73 | 74 | Your task is to analyze the system design explanation and identify key components, modules, or services mentioned. Then, try your best to map these components to what you believe could be their corresponding directories and files in the provided file tree. 75 | 76 | Guidelines: 77 | 1. Focus on major components described in the system design. 78 | 2. Look for directories and files that clearly correspond to these components. 79 | 3. Include both directories and specific files when relevant. 80 | 4. If a component doesn't have a clear corresponding file or directory, simply dont include it in the map. 81 | 82 | Now, provide your final answer in the following format: 83 | 84 | 85 | 1. [Component Name]: [File/Directory Path] 86 | 2. [Component Name]: [File/Directory Path] 87 | [Continue for all identified components] 88 | 89 | 90 | Remember to be as specific as possible in your mappings, only use what is given to you from the file tree, and to strictly follow the components mentioned in the explanation. 91 | """ 92 | 93 | # ❌ BELOW IS A REMOVED SECTION FROM THE ABOVE PROMPT USED FOR CLAUDE 3.5 SONNET 94 | # Before providing your final answer, use the to think through your process: 95 | # 1. List the key components identified in the system design. 96 | # 2. For each component, brainstorm potential corresponding directories or files. 97 | # 3. Verify your mappings by double-checking the file tree. 98 | 99 | # 100 | # [Your thought process here] 101 | # 102 | 103 | # just adding some clear separation between the prompts 104 | # ************************************************************ 105 | # ************************************************************ 106 | 107 | SYSTEM_THIRD_PROMPT = """ 108 | You are a principal software engineer tasked with creating a system design diagram using Mermaid.js based on a detailed explanation. Your goal is to accurately represent the architecture and design of the project as described in the explanation. 109 | 110 | The detailed explanation of the design will be enclosed in tags in the users message. 111 | 112 | Also, sourced from the explanation, as a bonus, a few of the identified components have been mapped to their paths in the project file tree, whether it is a directory or file which will be enclosed in tags in the users message. 113 | 114 | To create the Mermaid.js diagram: 115 | 116 | 1. Carefully read and analyze the provided design explanation. 117 | 2. Identify the main components, services, and their relationships within the system. 118 | 3. Determine the appropriate Mermaid.js diagram type to use (e.g., flowchart, sequence diagram, class diagram, architecture, etc.) based on the nature of the system described. 119 | 4. Create the Mermaid.js code to represent the design, ensuring that: 120 | a. All major components are included 121 | b. Relationships between components are clearly shown 122 | c. The diagram accurately reflects the architecture described in the explanation 123 | d. The layout is logical and easy to understand 124 | 125 | Guidelines for diagram components and relationships: 126 | - Use appropriate shapes for different types of components (e.g., rectangles for services, cylinders for databases, etc.) 127 | - Use clear and concise labels for each component 128 | - Show the direction of data flow or dependencies using arrows 129 | - Group related components together if applicable 130 | - Include any important notes or annotations mentioned in the explanation 131 | - Just follow the explanation. It will have everything you need. 132 | 133 | IMPORTANT!!: Please orient and draw the diagram as vertically as possible. You must avoid long horizontal lists of nodes and sections! 134 | 135 | You must include click events for components of the diagram that have been specified in the provided : 136 | - Do not try to include the full url. This will be processed by another program afterwards. All you need to do is include the path. 137 | - For example: 138 | - This is a correct click event: `click Example "app/example.js"` 139 | - This is an incorrect click event: `click Example "https://github.com/username/repo/blob/main/app/example.js"` 140 | - Do this for as many components as specified in the component mapping, include directories and files. 141 | - If you believe the component contains files and is a directory, include the directory path. 142 | - If you believe the component references a specific file, include the file path. 143 | - Make sure to include the full path to the directory or file exactly as specified in the component mapping. 144 | - It is very important that you do this for as many files as possible. The more the better. 145 | 146 | - IMPORTANT: THESE PATHS ARE FOR CLICK EVENTS ONLY, these paths should not be included in the diagram's node's names. Only for the click events. Paths should not be seen by the user. 147 | 148 | Your output should be valid Mermaid.js code that can be rendered into a diagram. 149 | 150 | Do not include an init declaration such as `%%{init: {'key':'etc'}}%%`. This is handled externally. Just return the diagram code. 151 | 152 | Your response must strictly be just the Mermaid.js code, without any additional text or explanations. 153 | No code fence or markdown ticks needed, simply return the Mermaid.js code. 154 | 155 | Ensure that your diagram adheres strictly to the given explanation, without adding or omitting any significant components or relationships. 156 | 157 | For general direction, the provided example below is how you should structure your code: 158 | 159 | ```mermaid 160 | flowchart TD 161 | %% or graph TD, your choice 162 | 163 | %% Global entities 164 | A("Entity A"):::external 165 | %% more... 166 | 167 | %% Subgraphs and modules 168 | subgraph "Layer A" 169 | A1("Module A"):::example 170 | %% more modules... 171 | %% inner subgraphs if needed... 172 | end 173 | 174 | %% more subgraphs, modules, etc... 175 | 176 | %% Connections 177 | A -->|"relationship"| B 178 | %% and a lot more... 179 | 180 | %% Click Events 181 | click A1 "example/example.js" 182 | %% and a lot more... 183 | 184 | %% Styles 185 | classDef frontend %%... 186 | %% and a lot more... 187 | ``` 188 | 189 | EXTREMELY Important notes on syntax!!! (PAY ATTENTION TO THIS): 190 | - Make sure to add colour to the diagram!!! This is extremely critical. 191 | - In Mermaid.js syntax, we cannot include special characters for nodes without being inside quotes! For example: `EX[/api/process (Backend)]:::api` and `API -->|calls Process()| Backend` are two examples of syntax errors. They should be `EX["/api/process (Backend)"]:::api` and `API -->|"calls Process()"| Backend` respectively. Notice the quotes. This is extremely important. Make sure to include quotes for any string that contains special characters. 192 | - In Mermaid.js syntax, you cannot apply a class style directly within a subgraph declaration. For example: `subgraph "Frontend Layer":::frontend` is a syntax error. However, you can apply them to nodes within the subgraph. For example: `Example["Example Node"]:::frontend` is valid, and `class Example1,Example2 frontend` is valid. 193 | - In Mermaid.js syntax, there cannot be spaces in the relationship label names. For example: `A -->| "example relationship" | B` is a syntax error. It should be `A -->|"example relationship"| B` 194 | - In Mermaid.js syntax, you cannot give subgraphs an alias like nodes. For example: `subgraph A "Layer A"` is a syntax error. It should be `subgraph "Layer A"` 195 | """ 196 | # ^^^ note: ive generated a few diagrams now and claude still writes incorrect mermaid code sometimes. in the future, refer to those generated diagrams and add important instructions to the prompt above to avoid those mistakes. examples are best. 197 | 198 | # e. A legend is included 199 | # ^ removed since it was making the diagrams very long 200 | 201 | 202 | ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT = """ 203 | IMPORTANT: the user will provide custom additional instructions enclosed in tags. Please take these into account and give priority to them. However, if these instructions are unrelated to the task, unclear, or not possible to follow, ignore them by simply responding with: "BAD_INSTRUCTIONS" 204 | """ 205 | 206 | SYSTEM_MODIFY_PROMPT = """ 207 | You are tasked with modifying the code of a Mermaid.js diagram based on the provided instructions. The diagram will be enclosed in tags in the users message. 208 | 209 | Also, to help you modify it and simply for additional context, you will also be provided with the original explanation of the diagram enclosed in tags in the users message. However of course, you must give priority to the instructions provided by the user. 210 | 211 | The instructions will be enclosed in tags in the users message. If these instructions are unrelated to the task, unclear, or not possible to follow, ignore them by simply responding with: "BAD_INSTRUCTIONS" 212 | 213 | Your response must strictly be just the Mermaid.js code, without any additional text or explanations. Keep as many of the existing click events as possible. 214 | No code fence or markdown ticks needed, simply return the Mermaid.js code. 215 | """ 216 | -------------------------------------------------------------------------------- /backend/app/routers/generate.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request, HTTPException 2 | from fastapi.responses import StreamingResponse 3 | from dotenv import load_dotenv 4 | from app.services.github_service import GitHubService 5 | from app.services.o4_mini_openai_service import OpenAIo4Service 6 | from app.prompts import ( 7 | SYSTEM_FIRST_PROMPT, 8 | SYSTEM_SECOND_PROMPT, 9 | SYSTEM_THIRD_PROMPT, 10 | ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT, 11 | ) 12 | from anthropic._exceptions import RateLimitError 13 | from pydantic import BaseModel 14 | from functools import lru_cache 15 | import re 16 | import json 17 | import asyncio 18 | 19 | # from app.services.claude_service import ClaudeService 20 | # from app.core.limiter import limiter 21 | 22 | load_dotenv() 23 | 24 | router = APIRouter(prefix="/generate", tags=["OpenAI o4-mini"]) 25 | 26 | # Initialize services 27 | # claude_service = ClaudeService() 28 | o4_service = OpenAIo4Service() 29 | 30 | 31 | # cache github data to avoid double API calls from cost and generate 32 | @lru_cache(maxsize=100) 33 | def get_cached_github_data(username: str, repo: str, github_pat: str | None = None): 34 | # Create a new service instance for each call with the appropriate PAT 35 | current_github_service = GitHubService(pat=github_pat) 36 | 37 | default_branch = current_github_service.get_default_branch(username, repo) 38 | if not default_branch: 39 | default_branch = "main" # fallback value 40 | 41 | file_tree = current_github_service.get_github_file_paths_as_list(username, repo) 42 | readme = current_github_service.get_github_readme(username, repo) 43 | 44 | return {"default_branch": default_branch, "file_tree": file_tree, "readme": readme} 45 | 46 | 47 | class ApiRequest(BaseModel): 48 | username: str 49 | repo: str 50 | instructions: str = "" 51 | api_key: str | None = None 52 | github_pat: str | None = None 53 | 54 | 55 | @router.post("/cost") 56 | # @limiter.limit("5/minute") # TEMP: disable rate limit for growth?? 57 | async def get_generation_cost(request: Request, body: ApiRequest): 58 | try: 59 | # Get file tree and README content 60 | github_data = get_cached_github_data(body.username, body.repo, body.github_pat) 61 | file_tree = github_data["file_tree"] 62 | readme = github_data["readme"] 63 | 64 | # Calculate combined token count 65 | # file_tree_tokens = claude_service.count_tokens(file_tree) 66 | # readme_tokens = claude_service.count_tokens(readme) 67 | 68 | file_tree_tokens = o4_service.count_tokens(file_tree) 69 | readme_tokens = o4_service.count_tokens(readme) 70 | 71 | # CLAUDE: Calculate approximate cost 72 | # Input cost: $3 per 1M tokens ($0.000003 per token) 73 | # Output cost: $15 per 1M tokens ($0.000015 per token) 74 | # input_cost = ((file_tree_tokens * 2 + readme_tokens) + 3000) * 0.000003 75 | # output_cost = 3500 * 0.000015 76 | # estimated_cost = input_cost + output_cost 77 | 78 | # Input cost: $1.1 per 1M tokens ($0.0000011 per token) 79 | # Output cost: $4.4 per 1M tokens ($0.0000044 per token) 80 | input_cost = ((file_tree_tokens * 2 + readme_tokens) + 3000) * 0.0000011 81 | output_cost = ( 82 | 8000 * 0.0000044 83 | ) # 8k just based on what I've seen (reasoning is expensive) 84 | estimated_cost = input_cost + output_cost 85 | 86 | # Format as currency string 87 | cost_string = f"${estimated_cost:.2f} USD" 88 | return {"cost": cost_string} 89 | except Exception as e: 90 | return {"error": str(e)} 91 | 92 | 93 | def process_click_events(diagram: str, username: str, repo: str, branch: str) -> str: 94 | """ 95 | Process click events in Mermaid diagram to include full GitHub URLs. 96 | Detects if path is file or directory and uses appropriate URL format. 97 | """ 98 | 99 | def replace_path(match): 100 | # Extract the path from the click event 101 | path = match.group(2).strip("\"'") 102 | 103 | # Determine if path is likely a file (has extension) or directory 104 | is_file = "." in path.split("/")[-1] 105 | 106 | # Construct GitHub URL 107 | base_url = f"https://github.com/{username}/{repo}" 108 | path_type = "blob" if is_file else "tree" 109 | full_url = f"{base_url}/{path_type}/{branch}/{path}" 110 | 111 | # Return the full click event with the new URL 112 | return f'click {match.group(1)} "{full_url}"' 113 | 114 | # Match click events: click ComponentName "path/to/something" 115 | click_pattern = r'click ([^\s"]+)\s+"([^"]+)"' 116 | return re.sub(click_pattern, replace_path, diagram) 117 | 118 | 119 | @router.post("/stream") 120 | async def generate_stream(request: Request, body: ApiRequest): 121 | try: 122 | # Initial validation checks 123 | if len(body.instructions) > 1000: 124 | return {"error": "Instructions exceed maximum length of 1000 characters"} 125 | 126 | if body.repo in [ 127 | "fastapi", 128 | "streamlit", 129 | "flask", 130 | "api-analytics", 131 | "monkeytype", 132 | ]: 133 | return {"error": "Example repos cannot be regenerated"} 134 | 135 | async def event_generator(): 136 | try: 137 | # Get cached github data 138 | github_data = get_cached_github_data( 139 | body.username, body.repo, body.github_pat 140 | ) 141 | default_branch = github_data["default_branch"] 142 | file_tree = github_data["file_tree"] 143 | readme = github_data["readme"] 144 | 145 | # Send initial status 146 | yield f"data: {json.dumps({'status': 'started', 'message': 'Starting generation process...'})}\n\n" 147 | await asyncio.sleep(0.1) 148 | 149 | # Token count check 150 | combined_content = f"{file_tree}\n{readme}" 151 | token_count = o4_service.count_tokens(combined_content) 152 | 153 | if 50000 < token_count < 195000 and not body.api_key: 154 | yield f"data: {json.dumps({'error': f'File tree and README combined exceeds token limit (50,000). Current size: {token_count} tokens. This GitHub repository is too large for my wallet, but you can continue by providing your own OpenAI API key.'})}\n\n" 155 | return 156 | elif token_count > 195000: 157 | yield f"data: {json.dumps({'error': f'Repository is too large (>195k tokens) for analysis. OpenAI o4-mini\'s max context length is 200k tokens. Current size: {token_count} tokens.'})}\n\n" 158 | return 159 | 160 | # Prepare prompts 161 | first_system_prompt = SYSTEM_FIRST_PROMPT 162 | third_system_prompt = SYSTEM_THIRD_PROMPT 163 | if body.instructions: 164 | first_system_prompt = ( 165 | first_system_prompt 166 | + "\n" 167 | + ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT 168 | ) 169 | third_system_prompt = ( 170 | third_system_prompt 171 | + "\n" 172 | + ADDITIONAL_SYSTEM_INSTRUCTIONS_PROMPT 173 | ) 174 | 175 | # Phase 1: Get explanation 176 | yield f"data: {json.dumps({'status': 'explanation_sent', 'message': 'Sending explanation request to o4-mini...'})}\n\n" 177 | await asyncio.sleep(0.1) 178 | yield f"data: {json.dumps({'status': 'explanation', 'message': 'Analyzing repository structure...'})}\n\n" 179 | explanation = "" 180 | async for chunk in o4_service.call_o4_api_stream( 181 | system_prompt=first_system_prompt, 182 | data={ 183 | "file_tree": file_tree, 184 | "readme": readme, 185 | "instructions": body.instructions, 186 | }, 187 | api_key=body.api_key, 188 | reasoning_effort="medium", 189 | ): 190 | explanation += chunk 191 | yield f"data: {json.dumps({'status': 'explanation_chunk', 'chunk': chunk})}\n\n" 192 | 193 | if "BAD_INSTRUCTIONS" in explanation: 194 | yield f"data: {json.dumps({'error': 'Invalid or unclear instructions provided'})}\n\n" 195 | return 196 | 197 | # Phase 2: Get component mapping 198 | yield f"data: {json.dumps({'status': 'mapping_sent', 'message': 'Sending component mapping request to o4-mini...'})}\n\n" 199 | await asyncio.sleep(0.1) 200 | yield f"data: {json.dumps({'status': 'mapping', 'message': 'Creating component mapping...'})}\n\n" 201 | full_second_response = "" 202 | async for chunk in o4_service.call_o4_api_stream( 203 | system_prompt=SYSTEM_SECOND_PROMPT, 204 | data={"explanation": explanation, "file_tree": file_tree}, 205 | api_key=body.api_key, 206 | reasoning_effort="low", 207 | ): 208 | full_second_response += chunk 209 | yield f"data: {json.dumps({'status': 'mapping_chunk', 'chunk': chunk})}\n\n" 210 | 211 | # i dont think i need this anymore? but keep it here for now 212 | # Extract component mapping 213 | start_tag = "" 214 | end_tag = "" 215 | component_mapping_text = full_second_response[ 216 | full_second_response.find(start_tag) : full_second_response.find( 217 | end_tag 218 | ) 219 | ] 220 | 221 | # Phase 3: Generate Mermaid diagram 222 | yield f"data: {json.dumps({'status': 'diagram_sent', 'message': 'Sending diagram generation request to o4-mini...'})}\n\n" 223 | await asyncio.sleep(0.1) 224 | yield f"data: {json.dumps({'status': 'diagram', 'message': 'Generating diagram...'})}\n\n" 225 | mermaid_code = "" 226 | async for chunk in o4_service.call_o4_api_stream( 227 | system_prompt=third_system_prompt, 228 | data={ 229 | "explanation": explanation, 230 | "component_mapping": component_mapping_text, 231 | "instructions": body.instructions, 232 | }, 233 | api_key=body.api_key, 234 | reasoning_effort="low", 235 | ): 236 | mermaid_code += chunk 237 | yield f"data: {json.dumps({'status': 'diagram_chunk', 'chunk': chunk})}\n\n" 238 | 239 | # Process final diagram 240 | mermaid_code = mermaid_code.replace("```mermaid", "").replace("```", "") 241 | if "BAD_INSTRUCTIONS" in mermaid_code: 242 | yield f"data: {json.dumps({'error': 'Invalid or unclear instructions provided'})}\n\n" 243 | return 244 | 245 | processed_diagram = process_click_events( 246 | mermaid_code, body.username, body.repo, default_branch 247 | ) 248 | 249 | # Send final result 250 | yield f"data: {json.dumps({ 251 | 'status': 'complete', 252 | 'diagram': processed_diagram, 253 | 'explanation': explanation, 254 | 'mapping': component_mapping_text 255 | })}\n\n" 256 | 257 | except Exception as e: 258 | yield f"data: {json.dumps({'error': str(e)})}\n\n" 259 | 260 | return StreamingResponse( 261 | event_generator(), 262 | media_type="text/event-stream", 263 | headers={ 264 | "X-Accel-Buffering": "no", # Hint to Nginx 265 | "Cache-Control": "no-cache", 266 | "Connection": "keep-alive", 267 | }, 268 | ) 269 | except Exception as e: 270 | return {"error": str(e)} 271 | -------------------------------------------------------------------------------- /backend/app/routers/modify.py: -------------------------------------------------------------------------------- 1 | from fastapi import APIRouter, Request, HTTPException 2 | from dotenv import load_dotenv 3 | 4 | # from app.services.claude_service import ClaudeService 5 | # from app.core.limiter import limiter 6 | from anthropic._exceptions import RateLimitError 7 | from app.prompts import SYSTEM_MODIFY_PROMPT 8 | from pydantic import BaseModel 9 | from app.services.o1_mini_openai_service import OpenAIO1Service 10 | 11 | 12 | load_dotenv() 13 | 14 | router = APIRouter(prefix="/modify", tags=["Claude"]) 15 | 16 | # Initialize services 17 | # claude_service = ClaudeService() 18 | o1_service = OpenAIO1Service() 19 | 20 | 21 | # Define the request body model 22 | 23 | 24 | class ModifyRequest(BaseModel): 25 | instructions: str 26 | current_diagram: str 27 | repo: str 28 | username: str 29 | explanation: str 30 | 31 | 32 | @router.post("") 33 | # @limiter.limit("2/minute;10/day") 34 | async def modify(request: Request, body: ModifyRequest): 35 | try: 36 | # Check instructions length 37 | if not body.instructions or not body.current_diagram: 38 | return {"error": "Instructions and/or current diagram are required"} 39 | elif ( 40 | len(body.instructions) > 1000 or len(body.current_diagram) > 100000 41 | ): # just being safe 42 | return {"error": "Instructions exceed maximum length of 1000 characters"} 43 | 44 | if body.repo in [ 45 | "fastapi", 46 | "streamlit", 47 | "flask", 48 | "api-analytics", 49 | "monkeytype", 50 | ]: 51 | return {"error": "Example repos cannot be modified"} 52 | 53 | # modified_mermaid_code = claude_service.call_claude_api( 54 | # system_prompt=SYSTEM_MODIFY_PROMPT, 55 | # data={ 56 | # "instructions": body.instructions, 57 | # "explanation": body.explanation, 58 | # "diagram": body.current_diagram, 59 | # }, 60 | # ) 61 | 62 | modified_mermaid_code = o1_service.call_o1_api( 63 | system_prompt=SYSTEM_MODIFY_PROMPT, 64 | data={ 65 | "instructions": body.instructions, 66 | "explanation": body.explanation, 67 | "diagram": body.current_diagram, 68 | }, 69 | ) 70 | 71 | # Check for BAD_INSTRUCTIONS response 72 | if "BAD_INSTRUCTIONS" in modified_mermaid_code: 73 | return {"error": "Invalid or unclear instructions provided"} 74 | 75 | return {"diagram": modified_mermaid_code} 76 | except RateLimitError as e: 77 | raise HTTPException( 78 | status_code=429, 79 | detail="Service is currently experiencing high demand. Please try again in a few minutes.", 80 | ) 81 | except Exception as e: 82 | return {"error": str(e)} 83 | -------------------------------------------------------------------------------- /backend/app/services/claude_service.py: -------------------------------------------------------------------------------- 1 | from anthropic import Anthropic 2 | from dotenv import load_dotenv 3 | from app.utils.format_message import format_user_message 4 | 5 | load_dotenv() 6 | 7 | 8 | class ClaudeService: 9 | def __init__(self): 10 | self.default_client = Anthropic() 11 | 12 | def call_claude_api( 13 | self, system_prompt: str, data: dict, api_key: str | None = None 14 | ) -> str: 15 | """ 16 | Makes an API call to Claude and returns the response. 17 | 18 | Args: 19 | system_prompt (str): The instruction/system prompt 20 | data (dict): Dictionary of variables to format into the user message 21 | api_key (str | None): Optional custom API key 22 | 23 | Returns: 24 | str: Claude's response text 25 | """ 26 | # Create the user message with the data 27 | user_message = format_user_message(data) 28 | 29 | # Use custom client if API key provided, otherwise use default 30 | client = Anthropic(api_key=api_key) if api_key else self.default_client 31 | 32 | message = client.messages.create( 33 | model="claude-3-5-sonnet-latest", 34 | max_tokens=4096, 35 | temperature=0, 36 | system=system_prompt, 37 | messages=[ 38 | {"role": "user", "content": [{"type": "text", "text": user_message}]} 39 | ], 40 | ) 41 | return message.content[0].text # type: ignore 42 | 43 | def count_tokens(self, prompt: str) -> int: 44 | """ 45 | Counts the number of tokens in a prompt. 46 | 47 | Args: 48 | prompt (str): The prompt to count tokens for 49 | 50 | Returns: 51 | int: Number of input tokens 52 | """ 53 | response = self.default_client.messages.count_tokens( 54 | model="claude-3-5-sonnet-latest", 55 | messages=[{"role": "user", "content": prompt}], 56 | ) 57 | return response.input_tokens 58 | -------------------------------------------------------------------------------- /backend/app/services/github_service.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import jwt 3 | import time 4 | from datetime import datetime, timedelta 5 | from dotenv import load_dotenv 6 | import os 7 | 8 | load_dotenv() 9 | 10 | 11 | class GitHubService: 12 | def __init__(self, pat: str | None = None): 13 | # Try app authentication first 14 | self.client_id = os.getenv("GITHUB_CLIENT_ID") 15 | self.private_key = os.getenv("GITHUB_PRIVATE_KEY") 16 | self.installation_id = os.getenv("GITHUB_INSTALLATION_ID") 17 | 18 | # Use provided PAT if available, otherwise fallback to env PAT 19 | self.github_token = pat or os.getenv("GITHUB_PAT") 20 | 21 | # If no credentials are provided, warn about rate limits 22 | if ( 23 | not all([self.client_id, self.private_key, self.installation_id]) 24 | and not self.github_token 25 | ): 26 | print( 27 | "\033[93mWarning: No GitHub credentials provided. Using unauthenticated requests with rate limit of 60 requests/hour.\033[0m" 28 | ) 29 | 30 | self.access_token = None 31 | self.token_expires_at = None 32 | 33 | # autopep8: off 34 | def _generate_jwt(self): 35 | now = int(time.time()) 36 | payload = { 37 | "iat": now, 38 | "exp": now + (10 * 60), # 10 minutes 39 | "iss": self.client_id, 40 | } 41 | # Convert PEM string format to proper newlines 42 | return jwt.encode(payload, self.private_key, algorithm="RS256") # type: ignore 43 | 44 | # autopep8: on 45 | 46 | def _get_installation_token(self): 47 | if self.access_token and self.token_expires_at > datetime.now(): # type: ignore 48 | return self.access_token 49 | 50 | jwt_token = self._generate_jwt() 51 | response = requests.post( 52 | f"https://api.github.com/app/installations/{ 53 | self.installation_id}/access_tokens", 54 | headers={ 55 | "Authorization": f"Bearer {jwt_token}", 56 | "Accept": "application/vnd.github+json", 57 | }, 58 | ) 59 | data = response.json() 60 | self.access_token = data["token"] 61 | self.token_expires_at = datetime.now() + timedelta(hours=1) 62 | return self.access_token 63 | 64 | def _get_headers(self): 65 | # If no credentials are available, return basic headers 66 | if ( 67 | not all([self.client_id, self.private_key, self.installation_id]) 68 | and not self.github_token 69 | ): 70 | return {"Accept": "application/vnd.github+json"} 71 | 72 | # Use PAT if available 73 | if self.github_token: 74 | return { 75 | "Authorization": f"token {self.github_token}", 76 | "Accept": "application/vnd.github+json", 77 | } 78 | 79 | # Otherwise use app authentication 80 | token = self._get_installation_token() 81 | return { 82 | "Authorization": f"Bearer {token}", 83 | "Accept": "application/vnd.github+json", 84 | "X-GitHub-Api-Version": "2022-11-28", 85 | } 86 | 87 | def _check_repository_exists(self, username, repo): 88 | """ 89 | Check if the repository exists using the GitHub API. 90 | """ 91 | api_url = f"https://api.github.com/repos/{username}/{repo}" 92 | response = requests.get(api_url, headers=self._get_headers()) 93 | 94 | if response.status_code == 404: 95 | raise ValueError("Repository not found.") 96 | elif response.status_code != 200: 97 | raise Exception( 98 | f"Failed to check repository: {response.status_code}, {response.json()}" 99 | ) 100 | 101 | def get_default_branch(self, username, repo): 102 | """Get the default branch of the repository.""" 103 | api_url = f"https://api.github.com/repos/{username}/{repo}" 104 | response = requests.get(api_url, headers=self._get_headers()) 105 | 106 | if response.status_code == 200: 107 | return response.json().get("default_branch") 108 | return None 109 | 110 | def get_github_file_paths_as_list(self, username, repo): 111 | """ 112 | Fetches the file tree of an open-source GitHub repository, 113 | excluding static files and generated code. 114 | 115 | Args: 116 | username (str): The GitHub username or organization name 117 | repo (str): The repository name 118 | 119 | Returns: 120 | str: A filtered and formatted string of file paths in the repository, one per line. 121 | """ 122 | 123 | def should_include_file(path): 124 | # Patterns to exclude 125 | excluded_patterns = [ 126 | # Dependencies 127 | "node_modules/", 128 | "vendor/", 129 | "venv/", 130 | # Compiled files 131 | ".min.", 132 | ".pyc", 133 | ".pyo", 134 | ".pyd", 135 | ".so", 136 | ".dll", 137 | ".class", 138 | # Asset files 139 | ".jpg", 140 | ".jpeg", 141 | ".png", 142 | ".gif", 143 | ".ico", 144 | ".svg", 145 | ".ttf", 146 | ".woff", 147 | ".webp", 148 | # Cache and temporary files 149 | "__pycache__/", 150 | ".cache/", 151 | ".tmp/", 152 | # Lock files and logs 153 | "yarn.lock", 154 | "poetry.lock", 155 | "*.log", 156 | # Configuration files 157 | ".vscode/", 158 | ".idea/", 159 | ] 160 | 161 | return not any(pattern in path.lower() for pattern in excluded_patterns) 162 | 163 | # Try to get the default branch first 164 | branch = self.get_default_branch(username, repo) 165 | if branch: 166 | api_url = f"https://api.github.com/repos/{ 167 | username}/{repo}/git/trees/{branch}?recursive=1" 168 | response = requests.get(api_url, headers=self._get_headers()) 169 | 170 | if response.status_code == 200: 171 | data = response.json() 172 | if "tree" in data: 173 | # Filter the paths and join them with newlines 174 | paths = [ 175 | item["path"] 176 | for item in data["tree"] 177 | if should_include_file(item["path"]) 178 | ] 179 | return "\n".join(paths) 180 | 181 | # If default branch didn't work or wasn't found, try common branch names 182 | for branch in ["main", "master"]: 183 | api_url = f"https://api.github.com/repos/{ 184 | username}/{repo}/git/trees/{branch}?recursive=1" 185 | response = requests.get(api_url, headers=self._get_headers()) 186 | 187 | if response.status_code == 200: 188 | data = response.json() 189 | if "tree" in data: 190 | # Filter the paths and join them with newlines 191 | paths = [ 192 | item["path"] 193 | for item in data["tree"] 194 | if should_include_file(item["path"]) 195 | ] 196 | return "\n".join(paths) 197 | 198 | raise ValueError( 199 | "Could not fetch repository file tree. Repository might not exist, be empty or private." 200 | ) 201 | 202 | def get_github_readme(self, username, repo): 203 | """ 204 | Fetches the README contents of an open-source GitHub repository. 205 | 206 | Args: 207 | username (str): The GitHub username or organization name 208 | repo (str): The repository name 209 | 210 | Returns: 211 | str: The contents of the README file. 212 | 213 | Raises: 214 | ValueError: If repository does not exist or has no README. 215 | Exception: For other unexpected API errors. 216 | """ 217 | # First check if the repository exists 218 | self._check_repository_exists(username, repo) 219 | 220 | # Then attempt to fetch the README 221 | api_url = f"https://api.github.com/repos/{username}/{repo}/readme" 222 | response = requests.get(api_url, headers=self._get_headers()) 223 | 224 | if response.status_code == 404: 225 | raise ValueError("No README found for the specified repository.") 226 | elif response.status_code != 200: 227 | raise Exception( 228 | f"Failed to fetch README: { 229 | response.status_code}, {response.json()}" 230 | ) 231 | 232 | data = response.json() 233 | readme_content = requests.get(data["download_url"]).text 234 | return readme_content 235 | -------------------------------------------------------------------------------- /backend/app/services/o1_mini_openai_service.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from dotenv import load_dotenv 3 | from app.utils.format_message import format_user_message 4 | import tiktoken 5 | import os 6 | import aiohttp 7 | import json 8 | from typing import AsyncGenerator 9 | 10 | load_dotenv() 11 | 12 | 13 | class OpenAIO1Service: 14 | def __init__(self): 15 | self.default_client = OpenAI( 16 | api_key=os.getenv("OPENAI_API_KEY"), 17 | ) 18 | self.encoding = tiktoken.get_encoding("o200k_base") # Encoder for OpenAI models 19 | self.base_url = "https://api.openai.com/v1/chat/completions" 20 | 21 | def call_o1_api( 22 | self, 23 | system_prompt: str, 24 | data: dict, 25 | api_key: str | None = None, 26 | ) -> str: 27 | """ 28 | Makes an API call to OpenAI o1-mini and returns the response. 29 | 30 | Args: 31 | system_prompt (str): The instruction/system prompt 32 | data (dict): Dictionary of variables to format into the user message 33 | api_key (str | None): Optional custom API key 34 | 35 | Returns: 36 | str: o1-mini's response text 37 | """ 38 | # Create the user message with the data 39 | user_message = format_user_message(data) 40 | 41 | # Use custom client if API key provided, otherwise use default 42 | client = OpenAI(api_key=api_key) if api_key else self.default_client 43 | 44 | try: 45 | print( 46 | f"Making non-streaming API call to o1-mini with API key: {'custom key' if api_key else 'default key'}" 47 | ) 48 | 49 | completion = client.chat.completions.create( 50 | model="o1-mini", 51 | messages=[ 52 | {"role": "system", "content": system_prompt}, 53 | {"role": "user", "content": user_message}, 54 | ], 55 | max_completion_tokens=12000, # Adjust as needed 56 | temperature=0.2, 57 | ) 58 | 59 | print("API call completed successfully") 60 | 61 | if completion.choices[0].message.content is None: 62 | raise ValueError("No content returned from OpenAI o1-mini") 63 | 64 | return completion.choices[0].message.content 65 | 66 | except Exception as e: 67 | print(f"Error in OpenAI o1-mini API call: {str(e)}") 68 | raise 69 | 70 | async def call_o1_api_stream( 71 | self, 72 | system_prompt: str, 73 | data: dict, 74 | api_key: str | None = None, 75 | ) -> AsyncGenerator[str, None]: 76 | """ 77 | Makes a streaming API call to OpenAI o1-mini and yields the responses. 78 | 79 | Args: 80 | system_prompt (str): The instruction/system prompt 81 | data (dict): Dictionary of variables to format into the user message 82 | api_key (str | None): Optional custom API key 83 | 84 | Yields: 85 | str: Chunks of o1-mini's response text 86 | """ 87 | # Create the user message with the data 88 | user_message = format_user_message(data) 89 | 90 | headers = { 91 | "Content-Type": "application/json", 92 | "Authorization": f"Bearer {api_key or self.default_client.api_key}", 93 | } 94 | 95 | payload = { 96 | "model": "o1-mini", 97 | "messages": [ 98 | { 99 | "role": "user", 100 | "content": f""" 101 | 102 | {system_prompt} 103 | 104 | 105 | {user_message} 106 | 107 | """, 108 | }, 109 | ], 110 | "max_completion_tokens": 12000, 111 | "stream": True, 112 | } 113 | 114 | try: 115 | async with aiohttp.ClientSession() as session: 116 | async with session.post( 117 | self.base_url, headers=headers, json=payload 118 | ) as response: 119 | 120 | if response.status != 200: 121 | error_text = await response.text() 122 | print(f"Error response: {error_text}") 123 | raise ValueError( 124 | f"OpenAI API returned status code {response.status}: {error_text}" 125 | ) 126 | 127 | line_count = 0 128 | async for line in response.content: 129 | line = line.decode("utf-8").strip() 130 | if not line: 131 | continue 132 | 133 | line_count += 1 134 | 135 | if line.startswith("data: "): 136 | if line == "data: [DONE]": 137 | break 138 | try: 139 | data = json.loads(line[6:]) 140 | content = ( 141 | data.get("choices", [{}])[0] 142 | .get("delta", {}) 143 | .get("content") 144 | ) 145 | if content: 146 | yield content 147 | except json.JSONDecodeError as e: 148 | print(f"JSON decode error: {e} for line: {line}") 149 | continue 150 | 151 | if line_count == 0: 152 | print("Warning: No lines received in stream response") 153 | 154 | except aiohttp.ClientError as e: 155 | print(f"Connection error: {str(e)}") 156 | raise ValueError(f"Failed to connect to OpenAI API: {str(e)}") 157 | except Exception as e: 158 | print(f"Unexpected error in streaming API call: {str(e)}") 159 | raise 160 | 161 | def count_tokens(self, prompt: str) -> int: 162 | """ 163 | Counts the number of tokens in a prompt. 164 | 165 | Args: 166 | prompt (str): The prompt to count tokens for 167 | 168 | Returns: 169 | int: Estimated number of input tokens 170 | """ 171 | num_tokens = len(self.encoding.encode(prompt)) 172 | return num_tokens 173 | -------------------------------------------------------------------------------- /backend/app/services/o3_mini_openai_service.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from dotenv import load_dotenv 3 | from app.utils.format_message import format_user_message 4 | import tiktoken 5 | import os 6 | import aiohttp 7 | import json 8 | from typing import AsyncGenerator, Literal 9 | 10 | load_dotenv() 11 | 12 | 13 | class OpenAIo3Service: 14 | def __init__(self): 15 | self.default_client = OpenAI( 16 | api_key=os.getenv("OPENAI_API_KEY"), 17 | ) 18 | self.encoding = tiktoken.get_encoding("o200k_base") # Encoder for OpenAI models 19 | self.base_url = "https://api.openai.com/v1/chat/completions" 20 | 21 | def call_o3_api( 22 | self, 23 | system_prompt: str, 24 | data: dict, 25 | api_key: str | None = None, 26 | reasoning_effort: Literal["low", "medium", "high"] = "low", 27 | ) -> str: 28 | """ 29 | Makes an API call to OpenAI o3-mini and returns the response. 30 | 31 | Args: 32 | system_prompt (str): The instruction/system prompt 33 | data (dict): Dictionary of variables to format into the user message 34 | api_key (str | None): Optional custom API key 35 | 36 | Returns: 37 | str: o3-mini's response text 38 | """ 39 | # Create the user message with the data 40 | user_message = format_user_message(data) 41 | 42 | # Use custom client if API key provided, otherwise use default 43 | client = OpenAI(api_key=api_key) if api_key else self.default_client 44 | 45 | try: 46 | print( 47 | f"Making non-streaming API call to o3-mini with API key: {'custom key' if api_key else 'default key'}" 48 | ) 49 | 50 | completion = client.chat.completions.create( 51 | model="o3-mini", 52 | messages=[ 53 | {"role": "system", "content": system_prompt}, 54 | {"role": "user", "content": user_message}, 55 | ], 56 | max_completion_tokens=12000, # Adjust as needed 57 | temperature=0.2, 58 | reasoning_effort=reasoning_effort, 59 | ) 60 | 61 | print("API call completed successfully") 62 | 63 | if completion.choices[0].message.content is None: 64 | raise ValueError("No content returned from OpenAI o3-mini") 65 | 66 | return completion.choices[0].message.content 67 | 68 | except Exception as e: 69 | print(f"Error in OpenAI o3-mini API call: {str(e)}") 70 | raise 71 | 72 | async def call_o3_api_stream( 73 | self, 74 | system_prompt: str, 75 | data: dict, 76 | api_key: str | None = None, 77 | reasoning_effort: Literal["low", "medium", "high"] = "low", 78 | ) -> AsyncGenerator[str, None]: 79 | """ 80 | Makes a streaming API call to OpenAI o3-mini and yields the responses. 81 | 82 | Args: 83 | system_prompt (str): The instruction/system prompt 84 | data (dict): Dictionary of variables to format into the user message 85 | api_key (str | None): Optional custom API key 86 | 87 | Yields: 88 | str: Chunks of o3-mini's response text 89 | """ 90 | # Create the user message with the data 91 | user_message = format_user_message(data) 92 | 93 | headers = { 94 | "Content-Type": "application/json", 95 | "Authorization": f"Bearer {api_key or self.default_client.api_key}", 96 | } 97 | 98 | # payload = { 99 | # "model": "o3-mini", 100 | # "messages": [ 101 | # { 102 | # "role": "user", 103 | # "content": f""" 104 | # 105 | # {system_prompt} 106 | # 107 | # 108 | # {user_message} 109 | # 110 | # """, 111 | # }, 112 | # ], 113 | # "max_completion_tokens": 12000, 114 | # "stream": True, 115 | # } 116 | 117 | payload = { 118 | "model": "o3-mini", 119 | "messages": [ 120 | {"role": "system", "content": system_prompt}, 121 | {"role": "user", "content": user_message}, 122 | ], 123 | "max_completion_tokens": 12000, 124 | "stream": True, 125 | "reasoning_effort": reasoning_effort, 126 | } 127 | 128 | try: 129 | async with aiohttp.ClientSession() as session: 130 | async with session.post( 131 | self.base_url, headers=headers, json=payload 132 | ) as response: 133 | 134 | if response.status != 200: 135 | error_text = await response.text() 136 | print(f"Error response: {error_text}") 137 | raise ValueError( 138 | f"OpenAI API returned status code {response.status}: {error_text}" 139 | ) 140 | 141 | line_count = 0 142 | async for line in response.content: 143 | line = line.decode("utf-8").strip() 144 | if not line: 145 | continue 146 | 147 | line_count += 1 148 | 149 | if line.startswith("data: "): 150 | if line == "data: [DONE]": 151 | break 152 | try: 153 | data = json.loads(line[6:]) 154 | content = ( 155 | data.get("choices", [{}])[0] 156 | .get("delta", {}) 157 | .get("content") 158 | ) 159 | if content: 160 | yield content 161 | except json.JSONDecodeError as e: 162 | print(f"JSON decode error: {e} for line: {line}") 163 | continue 164 | 165 | if line_count == 0: 166 | print("Warning: No lines received in stream response") 167 | 168 | except aiohttp.ClientError as e: 169 | print(f"Connection error: {str(e)}") 170 | raise ValueError(f"Failed to connect to OpenAI API: {str(e)}") 171 | except Exception as e: 172 | print(f"Unexpected error in streaming API call: {str(e)}") 173 | raise 174 | 175 | def count_tokens(self, prompt: str) -> int: 176 | """ 177 | Counts the number of tokens in a prompt. 178 | 179 | Args: 180 | prompt (str): The prompt to count tokens for 181 | 182 | Returns: 183 | int: Estimated number of input tokens 184 | """ 185 | num_tokens = len(self.encoding.encode(prompt)) 186 | return num_tokens 187 | -------------------------------------------------------------------------------- /backend/app/services/o3_mini_openrouter_service.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from dotenv import load_dotenv 3 | from app.utils.format_message import format_user_message 4 | import tiktoken 5 | import os 6 | import aiohttp 7 | import json 8 | from typing import Literal, AsyncGenerator 9 | 10 | load_dotenv() 11 | 12 | 13 | class OpenRouterO3Service: 14 | def __init__(self): 15 | self.default_client = OpenAI( 16 | base_url="https://openrouter.ai/api/v1", 17 | api_key=os.getenv("OPENROUTER_API_KEY"), 18 | ) 19 | self.encoding = tiktoken.get_encoding("o200k_base") 20 | self.base_url = "https://openrouter.ai/api/v1/chat/completions" 21 | 22 | def call_o3_api( 23 | self, 24 | system_prompt: str, 25 | data: dict, 26 | api_key: str | None = None, 27 | reasoning_effort: Literal["low", "medium", "high"] = "low", 28 | ) -> str: 29 | """ 30 | Makes an API call to OpenRouter O3 and returns the response. 31 | 32 | Args: 33 | system_prompt (str): The instruction/system prompt 34 | data (dict): Dictionary of variables to format into the user message 35 | api_key (str | None): Optional custom API key 36 | 37 | Returns: 38 | str: O3's response text 39 | """ 40 | # Create the user message with the data 41 | user_message = format_user_message(data) 42 | 43 | # Use custom client if API key provided, otherwise use default 44 | client = ( 45 | OpenAI(base_url="https://openrouter.ai/api/v1", api_key=api_key) 46 | if api_key 47 | else self.default_client 48 | ) 49 | 50 | completion = client.chat.completions.create( 51 | extra_headers={ 52 | "HTTP-Referer": "https://gitdiagram.com", # Optional. Site URL for rankings on openrouter.ai. 53 | "X-Title": "gitdiagram", # Optional. Site title for rankings on openrouter.ai. 54 | }, 55 | model="openai/o3-mini", # Can be configured as needed 56 | reasoning_effort=reasoning_effort, # Can be adjusted based on needs 57 | messages=[ 58 | {"role": "system", "content": system_prompt}, 59 | {"role": "user", "content": user_message}, 60 | ], 61 | max_completion_tokens=12000, # Adjust as needed 62 | temperature=0.2, 63 | ) 64 | 65 | if completion.choices[0].message.content is None: 66 | raise ValueError("No content returned from OpenRouter O3") 67 | 68 | return completion.choices[0].message.content 69 | 70 | async def call_o3_api_stream( 71 | self, 72 | system_prompt: str, 73 | data: dict, 74 | api_key: str | None = None, 75 | reasoning_effort: Literal["low", "medium", "high"] = "low", 76 | ) -> AsyncGenerator[str, None]: 77 | """ 78 | Makes a streaming API call to OpenRouter O3 and yields the responses. 79 | 80 | Args: 81 | system_prompt (str): The instruction/system prompt 82 | data (dict): Dictionary of variables to format into the user message 83 | api_key (str | None): Optional custom API key 84 | 85 | Yields: 86 | str: Chunks of O3's response text 87 | """ 88 | # Create the user message with the data 89 | user_message = format_user_message(data) 90 | 91 | headers = { 92 | "HTTP-Referer": "https://gitdiagram.com", 93 | "X-Title": "gitdiagram", 94 | "Authorization": f"Bearer {api_key or self.default_client.api_key}", 95 | "Content-Type": "application/json", 96 | } 97 | 98 | payload = { 99 | "model": "openai/o3-mini", 100 | "messages": [ 101 | {"role": "system", "content": system_prompt}, 102 | {"role": "user", "content": user_message}, 103 | ], 104 | "max_tokens": 12000, 105 | "temperature": 0.2, 106 | "stream": True, 107 | "reasoning_effort": reasoning_effort, 108 | } 109 | 110 | buffer = "" 111 | async with aiohttp.ClientSession() as session: 112 | async with session.post( 113 | self.base_url, headers=headers, json=payload 114 | ) as response: 115 | async for line in response.content: 116 | line = line.decode("utf-8").strip() 117 | if line.startswith("data: "): 118 | if line == "data: [DONE]": 119 | break 120 | try: 121 | data = json.loads(line[6:]) 122 | if ( 123 | content := data.get("choices", [{}])[0] 124 | .get("delta", {}) 125 | .get("content") 126 | ): 127 | yield content 128 | except json.JSONDecodeError: 129 | # Skip any non-JSON lines (like the OPENROUTER PROCESSING comments) 130 | continue 131 | 132 | def count_tokens(self, prompt: str) -> int: 133 | """ 134 | Counts the number of tokens in a prompt. 135 | Note: This is a rough estimate as OpenRouter may not provide direct token counting. 136 | 137 | Args: 138 | prompt (str): The prompt to count tokens for 139 | 140 | Returns: 141 | int: Estimated number of input tokens 142 | """ 143 | num_tokens = len(self.encoding.encode(prompt)) 144 | return num_tokens 145 | -------------------------------------------------------------------------------- /backend/app/services/o4_mini_openai_service.py: -------------------------------------------------------------------------------- 1 | from openai import OpenAI 2 | from dotenv import load_dotenv 3 | from app.utils.format_message import format_user_message 4 | import tiktoken 5 | import os 6 | import aiohttp 7 | import json 8 | from typing import AsyncGenerator, Literal 9 | 10 | load_dotenv() 11 | 12 | 13 | class OpenAIo4Service: 14 | def __init__(self): 15 | self.default_client = OpenAI( 16 | api_key=os.getenv("OPENAI_API_KEY"), 17 | ) 18 | self.encoding = tiktoken.get_encoding("o200k_base") # Encoder for OpenAI models 19 | self.base_url = "https://api.openai.com/v1/chat/completions" 20 | 21 | def call_o4_api( 22 | self, 23 | system_prompt: str, 24 | data: dict, 25 | api_key: str | None = None, 26 | reasoning_effort: Literal["low", "medium", "high"] = "low", 27 | ) -> str: 28 | """ 29 | Makes an API call to OpenAI o4-mini and returns the response. 30 | 31 | Args: 32 | system_prompt (str): The instruction/system prompt 33 | data (dict): Dictionary of variables to format into the user message 34 | api_key (str | None): Optional custom API key 35 | 36 | Returns: 37 | str: o4-mini's response text 38 | """ 39 | # Create the user message with the data 40 | user_message = format_user_message(data) 41 | 42 | # Use custom client if API key provided, otherwise use default 43 | client = OpenAI(api_key=api_key) if api_key else self.default_client 44 | 45 | try: 46 | print( 47 | f"Making non-streaming API call to o4-mini with API key: {'custom key' if api_key else 'default key'}" 48 | ) 49 | 50 | completion = client.chat.completions.create( 51 | model="o4-mini", 52 | messages=[ 53 | {"role": "system", "content": system_prompt}, 54 | {"role": "user", "content": user_message}, 55 | ], 56 | max_completion_tokens=12000, # Adjust as needed 57 | temperature=0.2, 58 | reasoning_effort=reasoning_effort, 59 | ) 60 | 61 | print("API call completed successfully") 62 | 63 | if completion.choices[0].message.content is None: 64 | raise ValueError("No content returned from OpenAI o4-mini") 65 | 66 | return completion.choices[0].message.content 67 | 68 | except Exception as e: 69 | print(f"Error in OpenAI o4-mini API call: {str(e)}") 70 | raise 71 | 72 | async def call_o4_api_stream( 73 | self, 74 | system_prompt: str, 75 | data: dict, 76 | api_key: str | None = None, 77 | reasoning_effort: Literal["low", "medium", "high"] = "low", 78 | ) -> AsyncGenerator[str, None]: 79 | """ 80 | Makes a streaming API call to OpenAI o4-mini and yields the responses. 81 | 82 | Args: 83 | system_prompt (str): The instruction/system prompt 84 | data (dict): Dictionary of variables to format into the user message 85 | api_key (str | None): Optional custom API key 86 | 87 | Yields: 88 | str: Chunks of o4-mini's response text 89 | """ 90 | # Create the user message with the data 91 | user_message = format_user_message(data) 92 | 93 | headers = { 94 | "Content-Type": "application/json", 95 | "Authorization": f"Bearer {api_key or self.default_client.api_key}", 96 | } 97 | 98 | # payload = { 99 | # "model": "o3-mini", 100 | # "messages": [ 101 | # { 102 | # "role": "user", 103 | # "content": f""" 104 | # 105 | # {system_prompt} 106 | # 107 | # 108 | # {user_message} 109 | # 110 | # """, 111 | # }, 112 | # ], 113 | # "max_completion_tokens": 12000, 114 | # "stream": True, 115 | # } 116 | 117 | payload = { 118 | "model": "o4-mini", 119 | "messages": [ 120 | {"role": "system", "content": system_prompt}, 121 | {"role": "user", "content": user_message}, 122 | ], 123 | "max_completion_tokens": 12000, 124 | "stream": True, 125 | "reasoning_effort": reasoning_effort, 126 | } 127 | 128 | try: 129 | async with aiohttp.ClientSession() as session: 130 | async with session.post( 131 | self.base_url, headers=headers, json=payload 132 | ) as response: 133 | 134 | if response.status != 200: 135 | error_text = await response.text() 136 | print(f"Error response: {error_text}") 137 | raise ValueError( 138 | f"OpenAI API returned status code {response.status}: {error_text}" 139 | ) 140 | 141 | line_count = 0 142 | async for line in response.content: 143 | line = line.decode("utf-8").strip() 144 | if not line: 145 | continue 146 | 147 | line_count += 1 148 | 149 | if line.startswith("data: "): 150 | if line == "data: [DONE]": 151 | break 152 | try: 153 | data = json.loads(line[6:]) 154 | content = ( 155 | data.get("choices", [{}])[0] 156 | .get("delta", {}) 157 | .get("content") 158 | ) 159 | if content: 160 | yield content 161 | except json.JSONDecodeError as e: 162 | print(f"JSON decode error: {e} for line: {line}") 163 | continue 164 | 165 | if line_count == 0: 166 | print("Warning: No lines received in stream response") 167 | 168 | except aiohttp.ClientError as e: 169 | print(f"Connection error: {str(e)}") 170 | raise ValueError(f"Failed to connect to OpenAI API: {str(e)}") 171 | except Exception as e: 172 | print(f"Unexpected error in streaming API call: {str(e)}") 173 | raise 174 | 175 | def count_tokens(self, prompt: str) -> int: 176 | """ 177 | Counts the number of tokens in a prompt. 178 | 179 | Args: 180 | prompt (str): The prompt to count tokens for 181 | 182 | Returns: 183 | int: Estimated number of input tokens 184 | """ 185 | num_tokens = len(self.encoding.encode(prompt)) 186 | return num_tokens 187 | -------------------------------------------------------------------------------- /backend/app/utils/format_message.py: -------------------------------------------------------------------------------- 1 | def format_user_message(data: dict[str, str]) -> str: 2 | """ 3 | Formats a dictionary of data into a structured user message with XML-style tags. 4 | 5 | Args: 6 | data (dict[str, str]): Dictionary of key-value pairs to format 7 | 8 | Returns: 9 | str: Formatted message with each key-value pair wrapped in appropriate tags 10 | """ 11 | parts = [] 12 | for key, value in data.items(): 13 | # Map keys to their XML-style tags 14 | if key == "file_tree": 15 | parts.append(f"\n{value}\n") 16 | elif key == "readme": 17 | parts.append(f"\n{value}\n") 18 | elif key == "explanation": 19 | parts.append(f"\n{value}\n") 20 | elif key == "component_mapping": 21 | parts.append(f"\n{value}\n") 22 | elif key == "instructions": 23 | parts.append(f"\n{value}\n") 24 | elif key == "diagram": 25 | parts.append(f"\n{value}\n") 26 | 27 | return "\n\n".join(parts) 28 | -------------------------------------------------------------------------------- /backend/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit on any error 4 | set -e 5 | 6 | # Navigate to project directory 7 | cd ~/gitdiagram 8 | 9 | # Pull latest changes 10 | git pull origin main 11 | 12 | # Build and restart containers with production environment 13 | docker-compose down 14 | ENVIRONMENT=production docker-compose up --build -d 15 | 16 | # Remove unused images 17 | docker image prune -f 18 | 19 | # Show logs only if --logs flag is passed 20 | if [ "$1" == "--logs" ]; then 21 | docker-compose logs -f 22 | else 23 | echo "Deployment complete! Run 'docker-compose logs -f' to view logs" 24 | fi -------------------------------------------------------------------------------- /backend/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Current ENVIRONMENT: $ENVIRONMENT" 4 | 5 | if [ "$ENVIRONMENT" = "development" ]; then 6 | echo "Starting in development mode with hot reload..." 7 | exec uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload 8 | elif [ "$ENVIRONMENT" = "production" ]; then 9 | echo "Starting in production mode with multiple workers..." 10 | exec uvicorn app.main:app \ 11 | --host 0.0.0.0 \ 12 | --port 8000 \ 13 | --timeout-keep-alive 300 \ 14 | --workers 2 \ 15 | --loop uvloop \ 16 | --http httptools 17 | else 18 | echo "ENVIRONMENT must be set to either 'development' or 'production'" 19 | exit 1 20 | fi -------------------------------------------------------------------------------- /backend/nginx/api.conf: -------------------------------------------------------------------------------- 1 | server { 2 | server_name api.gitdiagram.com; 3 | 4 | # Block requests with no valid Host header 5 | if ($host !~ ^(api.gitdiagram.com)$) { 6 | return 444; 7 | } 8 | 9 | # Strictly allow only GET, POST, and OPTIONS requests for the specified paths (defined in my fastapi app) 10 | location ~ ^/(generate(/cost|/stream)?|modify|)?$ { 11 | if ($request_method !~ ^(GET|POST|OPTIONS)$) { 12 | return 444; 13 | } 14 | 15 | proxy_pass http://127.0.0.1:8000; 16 | include proxy_params; 17 | proxy_redirect off; 18 | 19 | # Disable buffering for SSE 20 | proxy_buffering off; 21 | proxy_cache off; 22 | 23 | # Required headers for SSE 24 | proxy_set_header Connection ''; 25 | proxy_http_version 1.1; 26 | } 27 | 28 | # Return 444 for everything else (no response, just close connection) 29 | location / { 30 | return 444; 31 | # keep access log on 32 | } 33 | 34 | # Add timeout settings 35 | proxy_connect_timeout 300; 36 | proxy_send_timeout 300; 37 | proxy_read_timeout 300; 38 | send_timeout 300; 39 | 40 | listen 443 ssl; # managed by Certbot 41 | ssl_certificate /etc/letsencrypt/live/api.gitdiagram.com/fullchain.pem; # managed by Certbot 42 | ssl_certificate_key /etc/letsencrypt/live/api.gitdiagram.com/privkey.pem; # managed by Certbot 43 | include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot 44 | ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot 45 | 46 | } 47 | server { 48 | if ($host = api.gitdiagram.com) { 49 | return 301 https://$host$request_uri; 50 | } # managed by Certbot 51 | 52 | 53 | listen 80; 54 | server_name api.gitdiagram.com; 55 | return 404; # managed by Certbot 56 | } -------------------------------------------------------------------------------- /backend/nginx/setup_nginx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit on any error 4 | set -e 5 | 6 | # Check if running as root 7 | if [ "$EUID" -ne 0 ]; then 8 | echo "Please run as root or with sudo" 9 | exit 1 10 | fi 11 | 12 | # Copy Nginx configuration 13 | echo "Copying Nginx configuration..." 14 | cp "$(dirname "$0")/api.conf" /etc/nginx/sites-available/api 15 | ln -sf /etc/nginx/sites-available/api /etc/nginx/sites-enabled/ 16 | 17 | # Test Nginx configuration 18 | echo "Testing Nginx configuration..." 19 | nginx -t 20 | 21 | # Reload Nginx 22 | echo "Reloading Nginx..." 23 | systemctl reload nginx 24 | 25 | echo "Nginx configuration updated successfully!" -------------------------------------------------------------------------------- /backend/requirements.txt: -------------------------------------------------------------------------------- 1 | aiohappyeyeballs==2.4.6 2 | aiohttp==3.11.12 3 | aiosignal==1.3.2 4 | annotated-types==0.7.0 5 | anthropic==0.42.0 6 | anyio==4.7.0 7 | api-analytics==1.2.5 8 | attrs==25.1.0 9 | certifi==2024.12.14 10 | cffi==1.17.1 11 | charset-normalizer==3.4.0 12 | click==8.1.7 13 | cryptography==44.0.0 14 | Deprecated==1.2.15 15 | distro==1.9.0 16 | dnspython==2.7.0 17 | email_validator==2.2.0 18 | fastapi==0.115.6 19 | fastapi-cli==0.0.6 20 | frozenlist==1.5.0 21 | h11==0.14.0 22 | httpcore==1.0.7 23 | httptools==0.6.4 24 | httpx==0.28.1 25 | idna==3.10 26 | Jinja2==3.1.4 27 | jiter==0.8.2 28 | limits==3.14.1 29 | markdown-it-py==3.0.0 30 | MarkupSafe==3.0.2 31 | mdurl==0.1.2 32 | multidict==6.1.0 33 | openai==1.61.1 34 | packaging==24.2 35 | propcache==0.2.1 36 | pycparser==2.22 37 | pydantic==2.10.3 38 | pydantic_core==2.27.1 39 | Pygments==2.18.0 40 | PyJWT==2.10.1 41 | python-dotenv==1.0.1 42 | python-multipart==0.0.19 43 | PyYAML==6.0.2 44 | regex==2024.11.6 45 | requests==2.32.3 46 | rich==13.9.4 47 | rich-toolkit==0.12.0 48 | shellingham==1.5.4 49 | slowapi==0.1.9 50 | sniffio==1.3.1 51 | starlette==0.41.3 52 | tiktoken==0.8.0 53 | tqdm==4.67.1 54 | typer==0.15.1 55 | typing_extensions==4.12.2 56 | urllib3==2.2.3 57 | uvicorn==0.34.0 58 | uvloop==0.21.0 59 | watchfiles==1.0.3 60 | websockets==14.1 61 | wrapt==1.17.0 62 | yarl==1.18.3 63 | -------------------------------------------------------------------------------- /components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "default", 4 | "rsc": true, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "tailwind.config.ts", 8 | "css": "src/styles/globals.css", 9 | "baseColor": "neutral", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "~/components", 15 | "utils": "~/lib/utils", 16 | "ui": "~/components/ui", 17 | "lib": "~/lib", 18 | "hooks": "~/hooks" 19 | }, 20 | "iconLibrary": "lucide" 21 | } -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | api: 3 | build: 4 | context: ./backend 5 | dockerfile: Dockerfile 6 | ports: 7 | - "8000:8000" 8 | volumes: 9 | - ./backend:/app 10 | env_file: 11 | - .env 12 | environment: 13 | - ENVIRONMENT=${ENVIRONMENT:-development} # Default to development if not set 14 | restart: unless-stopped 15 | -------------------------------------------------------------------------------- /docs/readme_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedkhaleel2004/gitdiagram/9236492a681b221aff38e3dba9752af83d7823aa/docs/readme_img.png -------------------------------------------------------------------------------- /drizzle.config.ts: -------------------------------------------------------------------------------- 1 | import { type Config } from "drizzle-kit"; 2 | 3 | import { env } from "~/env"; 4 | 5 | export default { 6 | schema: "./src/server/db/schema.ts", 7 | dialect: "postgresql", 8 | dbCredentials: { 9 | url: env.POSTGRES_URL, 10 | }, 11 | tablesFilter: ["gitdiagram_*"], 12 | } satisfies Config; 13 | -------------------------------------------------------------------------------- /next.config.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Run `build` or `dev` with `SKIP_ENV_VALIDATION` to skip env validation. This is especially useful 3 | * for Docker builds. 4 | */ 5 | import "./src/env.js"; 6 | 7 | /** @type {import("next").NextConfig} */ 8 | const config = { 9 | reactStrictMode: false, 10 | async rewrites() { 11 | return [ 12 | { 13 | source: "/ingest/static/:path*", 14 | destination: "https://us-assets.i.posthog.com/static/:path*", 15 | }, 16 | { 17 | source: "/ingest/:path*", 18 | destination: "https://us.i.posthog.com/:path*", 19 | }, 20 | { 21 | source: "/ingest/decide", 22 | destination: "https://us.i.posthog.com/decide", 23 | }, 24 | ]; 25 | }, 26 | // This is required to support PostHog trailing slash API requests 27 | skipTrailingSlashRedirect: true, 28 | }; 29 | 30 | export default config; 31 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gitdiagram", 3 | "version": "0.1.0", 4 | "private": true, 5 | "type": "module", 6 | "scripts": { 7 | "build": "next build", 8 | "check": "next lint && tsc --noEmit", 9 | "db:generate": "drizzle-kit generate", 10 | "db:migrate": "drizzle-kit migrate", 11 | "db:push": "drizzle-kit push", 12 | "db:studio": "drizzle-kit studio", 13 | "dev": "next dev --turbo", 14 | "lint": "next lint", 15 | "lint:fix": "next lint --fix", 16 | "preview": "next build && next start", 17 | "start": "next start", 18 | "typecheck": "tsc --noEmit", 19 | "format:write": "prettier --write \"**/*.{ts,tsx,js,jsx,mdx}\" --cache", 20 | "format:check": "prettier --check \"**/*.{ts,tsx,js,jsx,mdx}\" --cache" 21 | }, 22 | "dependencies": { 23 | "@neondatabase/serverless": "^0.10.4", 24 | "@radix-ui/react-dialog": "^1.1.4", 25 | "@radix-ui/react-progress": "^1.1.1", 26 | "@radix-ui/react-slot": "^1.1.1", 27 | "@radix-ui/react-switch": "^1.1.3", 28 | "@radix-ui/react-tooltip": "^1.1.6", 29 | "@t3-oss/env-nextjs": "^0.10.1", 30 | "class-variance-authority": "^0.7.1", 31 | "clsx": "^2.1.1", 32 | "dotenv": "^16.4.7", 33 | "drizzle-orm": "^0.33.0", 34 | "geist": "^1.3.0", 35 | "ldrs": "^1.0.2", 36 | "lucide-react": "^0.468.0", 37 | "mermaid": "^11.4.1", 38 | "next": "^15.0.1", 39 | "next-themes": "^0.4.6", 40 | "postgres": "^3.4.4", 41 | "posthog-js": "^1.203.1", 42 | "react": "^18.3.1", 43 | "react-dom": "^18.3.1", 44 | "react-icons": "^5.4.0", 45 | "sonner": "^2.0.3", 46 | "svg-pan-zoom": "^3.6.2", 47 | "tailwind-merge": "^2.5.5", 48 | "tailwindcss-animate": "^1.0.7", 49 | "zod": "^3.23.3" 50 | }, 51 | "devDependencies": { 52 | "@types/eslint": "^8.56.10", 53 | "@types/node": "^20.14.10", 54 | "@types/react": "^18.3.3", 55 | "@types/react-dom": "^18.3.0", 56 | "@types/svg-pan-zoom": "^3.4.0", 57 | "@typescript-eslint/eslint-plugin": "^8.1.0", 58 | "@typescript-eslint/parser": "^8.1.0", 59 | "drizzle-kit": "^0.24.0", 60 | "eslint": "^8.57.0", 61 | "eslint-config-next": "^15.0.1", 62 | "eslint-plugin-drizzle": "^0.2.3", 63 | "postcss": "^8.4.39", 64 | "prettier": "^3.3.2", 65 | "prettier-plugin-tailwindcss": "^0.6.5", 66 | "tailwind-scrollbar": "^4.0.0", 67 | "tailwindcss": "^3.4.3", 68 | "typescript": "^5.5.3" 69 | }, 70 | "ct3aMetadata": { 71 | "initVersion": "7.38.1" 72 | }, 73 | "packageManager": "pnpm@9.13.0" 74 | } 75 | -------------------------------------------------------------------------------- /postcss.config.js: -------------------------------------------------------------------------------- 1 | export default { 2 | plugins: { 3 | tailwindcss: {}, 4 | }, 5 | }; 6 | -------------------------------------------------------------------------------- /prettier.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('prettier').Config & import('prettier-plugin-tailwindcss').PluginOptions} */ 2 | export default { 3 | plugins: ["prettier-plugin-tailwindcss"], 4 | }; 5 | -------------------------------------------------------------------------------- /public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedkhaleel2004/gitdiagram/9236492a681b221aff38e3dba9752af83d7823aa/public/favicon.ico -------------------------------------------------------------------------------- /public/og-image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahmedkhaleel2004/gitdiagram/9236492a681b221aff38e3dba9752af83d7823aa/public/og-image.png -------------------------------------------------------------------------------- /src/app/[username]/[repo]/page.tsx: -------------------------------------------------------------------------------- 1 | "use client"; 2 | 3 | import { useParams } from "next/navigation"; 4 | import MainCard from "~/components/main-card"; 5 | import Loading from "~/components/loading"; 6 | import MermaidChart from "~/components/mermaid-diagram"; 7 | import { useDiagram } from "~/hooks/useDiagram"; 8 | import { ApiKeyDialog } from "~/components/api-key-dialog"; 9 | import { ApiKeyButton } from "~/components/api-key-button"; 10 | import { useState } from "react"; 11 | import { useStarReminder } from "~/hooks/useStarReminder"; 12 | 13 | export default function Repo() { 14 | const [zoomingEnabled, setZoomingEnabled] = useState(false); 15 | const params = useParams<{ username: string; repo: string }>(); 16 | 17 | // Use the star reminder hook 18 | useStarReminder(); 19 | 20 | const { 21 | diagram, 22 | error, 23 | loading, 24 | lastGenerated, 25 | cost, 26 | showApiKeyDialog, 27 | handleModify, 28 | handleRegenerate, 29 | handleCopy, 30 | handleApiKeySubmit, 31 | handleCloseApiKeyDialog, 32 | handleOpenApiKeyDialog, 33 | handleExportImage, 34 | state, 35 | } = useDiagram(params.username.toLowerCase(), params.repo.toLowerCase()); 36 | 37 | return ( 38 |
39 |
40 | setZoomingEnabled(!zoomingEnabled)} 52 | loading={loading} 53 | /> 54 |
55 |
56 | {loading ? ( 57 | 64 | ) : error || state.error ? ( 65 |
66 |

67 | {error || state.error} 68 |

69 | {(error?.includes("API key") || 70 | state.error?.includes("API key")) && ( 71 |
72 | 73 |
74 | )} 75 |
76 | ) : ( 77 |
78 | 79 |
80 | )} 81 |
82 | 83 | 88 |
89 | ); 90 | } 91 | -------------------------------------------------------------------------------- /src/app/_actions/cache.ts: -------------------------------------------------------------------------------- 1 | "use server"; 2 | 3 | import { db } from "~/server/db"; 4 | import { eq, and } from "drizzle-orm"; 5 | import { diagramCache } from "~/server/db/schema"; 6 | import { sql } from "drizzle-orm"; 7 | 8 | export async function getCachedDiagram(username: string, repo: string) { 9 | try { 10 | const cached = await db 11 | .select() 12 | .from(diagramCache) 13 | .where( 14 | and(eq(diagramCache.username, username), eq(diagramCache.repo, repo)), 15 | ) 16 | .limit(1); 17 | 18 | return cached[0]?.diagram ?? null; 19 | } catch (error) { 20 | console.error("Error fetching cached diagram:", error); 21 | return null; 22 | } 23 | } 24 | 25 | export async function getCachedExplanation(username: string, repo: string) { 26 | try { 27 | const cached = await db 28 | .select() 29 | .from(diagramCache) 30 | .where( 31 | and(eq(diagramCache.username, username), eq(diagramCache.repo, repo)), 32 | ) 33 | .limit(1); 34 | 35 | return cached[0]?.explanation ?? null; 36 | } catch (error) { 37 | console.error("Error fetching cached explanation:", error); 38 | return null; 39 | } 40 | } 41 | 42 | export async function cacheDiagramAndExplanation( 43 | username: string, 44 | repo: string, 45 | diagram: string, 46 | explanation: string, 47 | usedOwnKey = false, 48 | ) { 49 | try { 50 | await db 51 | .insert(diagramCache) 52 | .values({ 53 | username, 54 | repo, 55 | diagram, 56 | explanation, 57 | usedOwnKey, 58 | }) 59 | .onConflictDoUpdate({ 60 | target: [diagramCache.username, diagramCache.repo], 61 | set: { 62 | diagram, 63 | explanation, 64 | usedOwnKey, 65 | updatedAt: new Date(), 66 | }, 67 | }); 68 | } catch (error) { 69 | console.error("Error caching diagram:", error); 70 | } 71 | } 72 | 73 | export async function getDiagramStats() { 74 | try { 75 | const stats = await db 76 | .select({ 77 | totalDiagrams: sql`COUNT(*)`, 78 | ownKeyUsers: sql`COUNT(CASE WHEN ${diagramCache.usedOwnKey} = true THEN 1 END)`, 79 | freeUsers: sql`COUNT(CASE WHEN ${diagramCache.usedOwnKey} = false THEN 1 END)`, 80 | }) 81 | .from(diagramCache); 82 | 83 | return stats[0]; 84 | } catch (error) { 85 | console.error("Error getting diagram stats:", error); 86 | return null; 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/app/_actions/github.ts: -------------------------------------------------------------------------------- 1 | import { cache } from "react"; 2 | 3 | interface GitHubResponse { 4 | stargazers_count: number; 5 | } 6 | 7 | export const getStarCount = cache(async () => { 8 | try { 9 | const response = await fetch( 10 | "https://api.github.com/repos/ahmedkhaleel2004/gitdiagram", 11 | { 12 | headers: { 13 | Accept: "application/vnd.github.v3+json", 14 | }, 15 | next: { 16 | revalidate: 300, // Cache for 5 minutes 17 | }, 18 | }, 19 | ); 20 | 21 | if (!response.ok) { 22 | throw new Error("Failed to fetch star count"); 23 | } 24 | 25 | const data = (await response.json()) as GitHubResponse; 26 | return data.stargazers_count; 27 | } catch (error) { 28 | console.error("Error fetching star count:", error); 29 | return null; 30 | } 31 | }); 32 | -------------------------------------------------------------------------------- /src/app/_actions/repo.ts: -------------------------------------------------------------------------------- 1 | "use server"; 2 | 3 | import { db } from "~/server/db"; 4 | import { eq, and } from "drizzle-orm"; 5 | import { diagramCache } from "~/server/db/schema"; 6 | 7 | export async function getLastGeneratedDate(username: string, repo: string) { 8 | const result = await db 9 | .select() 10 | .from(diagramCache) 11 | .where( 12 | and(eq(diagramCache.username, username), eq(diagramCache.repo, repo)), 13 | ); 14 | 15 | return result[0]?.updatedAt; 16 | } 17 | -------------------------------------------------------------------------------- /src/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import "~/styles/globals.css"; 2 | 3 | import { GeistSans } from "geist/font/sans"; 4 | import { type Metadata } from "next"; 5 | import { Header } from "~/components/header"; 6 | import { Footer } from "~/components/footer"; 7 | import { CSPostHogProvider } from "./providers"; 8 | import { Toaster } from "~/components/ui/sonner"; 9 | 10 | export const metadata: Metadata = { 11 | title: "GitDiagram", 12 | description: 13 | "Turn any GitHub repository into an interactive diagram for visualization in seconds.", 14 | metadataBase: new URL("https://gitdiagram.com"), 15 | keywords: [ 16 | "github", 17 | "git diagram", 18 | "git diagram generator", 19 | "git diagram tool", 20 | "git diagram maker", 21 | "git diagram creator", 22 | "git diagram", 23 | "diagram", 24 | "repository", 25 | "visualization", 26 | "code structure", 27 | "system design", 28 | "software architecture", 29 | "software design", 30 | "software engineering", 31 | "software development", 32 | "software architecture", 33 | "software design", 34 | "software engineering", 35 | "software development", 36 | "open source", 37 | "open source software", 38 | "ahmedkhaleel2004", 39 | "ahmed khaleel", 40 | "gitdiagram", 41 | "gitdiagram.com", 42 | ], 43 | authors: [ 44 | { name: "Ahmed Khaleel", url: "https://github.com/ahmedkhaleel2004" }, 45 | ], 46 | creator: "Ahmed Khaleel", 47 | openGraph: { 48 | type: "website", 49 | locale: "en_US", 50 | url: "https://gitdiagram.com", 51 | title: "GitDiagram - Repository to Diagram in Seconds", 52 | description: 53 | "Turn any GitHub repository into an interactive diagram for visualization.", 54 | siteName: "GitDiagram", 55 | images: [ 56 | { 57 | url: "/og-image.png", // You'll need to create this image 58 | width: 1200, 59 | height: 630, 60 | alt: "GitDiagram - Repository Visualization Tool", 61 | }, 62 | ], 63 | }, 64 | robots: { 65 | index: true, 66 | follow: true, 67 | googleBot: { 68 | index: true, 69 | follow: true, 70 | "max-snippet": -1, 71 | }, 72 | }, 73 | }; 74 | 75 | export default function RootLayout({ 76 | children, 77 | }: Readonly<{ children: React.ReactNode }>) { 78 | return ( 79 | 80 | 81 | 82 |
83 |
{children}
84 |