├── frontend.md
├── app
├── routes
│ ├── __init__.py
│ ├── actions.py
│ ├── recommend.py
│ └── generate.py
├── static
│ ├── favicon.ico
│ ├── logo_blue.png
│ ├── logo_pink.png
│ ├── favicon-16x16.png
│ ├── favicon-32x32.png
│ ├── apple-touch-icon.png
│ ├── site.webmanifest
│ └── js
│ │ ├── context_manager.js
│ │ ├── monaco_editor.js
│ │ └── workspace_manager.js
├── templates
│ ├── components
│ │ ├── footer.html
│ │ ├── hero.html
│ │ ├── navbar.html
│ │ ├── ActionButton.html
│ │ ├── workspace_editor.html
│ │ ├── context_modal.html
│ │ ├── workspace_files.html
│ │ ├── file_modal.html
│ │ ├── styles.html
│ │ └── final_step_modal.html
│ ├── base.html
│ ├── landing.html
│ └── docs.html
├── actions
│ ├── mcps.yaml
│ └── agents.yaml
├── models
│ └── actions.py
├── services
│ ├── mcp_installer.py
│ ├── search_service.py
│ ├── smart_ingest.py
│ ├── recommend_tools.py
│ └── actions_loader.py
└── main.py
├── Caddyfile
├── mcp_config.json
├── requirements.txt
├── docker-compose.yml
├── Dockerfile
├── .dockerignore
├── LICENSE
├── COLORS.md
├── cleanup_old_actions.py
├── CLAUDE.md
├── README.md
├── convert_to_yaml.py
├── consolidate_actions.py
├── .gitignore
├── test_state_management.html
└── gitingest.md
/frontend.md:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/app/routes/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/app/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coderamp-labs/gitrules/HEAD/app/static/favicon.ico
--------------------------------------------------------------------------------
/app/static/logo_blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coderamp-labs/gitrules/HEAD/app/static/logo_blue.png
--------------------------------------------------------------------------------
/app/static/logo_pink.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coderamp-labs/gitrules/HEAD/app/static/logo_pink.png
--------------------------------------------------------------------------------
/app/static/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coderamp-labs/gitrules/HEAD/app/static/favicon-16x16.png
--------------------------------------------------------------------------------
/app/static/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coderamp-labs/gitrules/HEAD/app/static/favicon-32x32.png
--------------------------------------------------------------------------------
/app/static/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coderamp-labs/gitrules/HEAD/app/static/apple-touch-icon.png
--------------------------------------------------------------------------------
/Caddyfile:
--------------------------------------------------------------------------------
1 | dev.gitrules.com {
2 | reverse_proxy localhost:8000
3 | encode gzip
4 | log
5 | }
6 |
7 | gitrules.com {
8 | reverse_proxy localhost:9000
9 | encode gzip
10 | log
11 | }
12 |
--------------------------------------------------------------------------------
/mcp_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "mcpServers": {
3 | "gitrules-search": {
4 | "command": "python",
5 | "args": ["-m", "mcp.client.sse", "http://localhost:8000/mcp"],
6 | "env": {}
7 | }
8 | }
9 | }
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi==0.115.0
2 | uvicorn[standard]==0.32.0
3 | jinja2==3.1.4
4 | python-multipart==0.0.12
5 | pydantic==2.9.2
6 | api-analytics
7 | python-dotenv
8 | pyyaml==6.0.1
9 | fastapi-mcp==0.4.0
10 | fuzzywuzzy
11 | python-Levenshtein
12 | gitingest
13 | httpx
14 | loguru==0.7.2
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services :
2 | app:
3 | build: .
4 | ports:
5 | - "8000:8000"
6 | volumes:
7 | - .:/app
8 | environment:
9 | - PYTHONUNBUFFERED=1
10 | command: uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
11 | env_file:
12 | - .env
13 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.13-slim
2 |
3 | # Install git and curl (required for gitingest)
4 | RUN apt-get update && apt-get install -y git curl && rm -rf /var/lib/apt/lists/*
5 |
6 | WORKDIR /app
7 |
8 | COPY requirements.txt .
9 | RUN pip install --no-cache-dir -r requirements.txt
10 |
11 | COPY . .
12 |
13 | EXPOSE 8000
14 |
15 | CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.pyc
3 | *.pyo
4 | *.pyd
5 | .Python
6 | env/
7 | venv/
8 | .venv
9 | pip-log.txt
10 | pip-delete-this-directory.txt
11 | .tox/
12 | .coverage
13 | .coverage.*
14 | .cache
15 | nosetests.xml
16 | coverage.xml
17 | *.cover
18 | *.log
19 | .git
20 | .gitignore
21 | .mypy_cache
22 | .pytest_cache
23 | .hypothesis
24 | .env
25 | *.env
26 | .DS_Store
27 | *.swp
28 | *.swo
29 | *~
30 | .idea
31 | .vscode
32 | *.iml
33 | out
34 | gen
--------------------------------------------------------------------------------
/app/templates/components/footer.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/app/static/site.webmanifest:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Gitrules",
3 | "short_name": "Gitrules",
4 | "description": "Instant rules for coding agents",
5 | "icons": [
6 | {
7 | "src": "/static/favicon-16x16.png",
8 | "sizes": "16x16",
9 | "type": "image/png"
10 | },
11 | {
12 | "src": "/static/favicon-32x32.png",
13 | "sizes": "32x32",
14 | "type": "image/png"
15 | },
16 | {
17 | "src": "/static/apple-touch-icon.png",
18 | "sizes": "180x180",
19 | "type": "image/png"
20 | }
21 | ],
22 | "theme_color": "#22D3EE",
23 | "background_color": "#ECFEFF",
24 | "display": "standalone",
25 | "start_url": "/"
26 | }
--------------------------------------------------------------------------------
/app/templates/components/hero.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Rules for
5 | coding agents
6 |
7 |
8 |

9 |

10 |
11 |
Augment your agents capabilities just by dropping files in your codebase.
12 |
Easiest way to add MCPs, subagents and coding rules in your repository.
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 Romain Courtois
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/COLORS.md:
--------------------------------------------------------------------------------
1 | # Gitrules Color Palette
2 |
3 | ## Primary Colors
4 |
5 | ### Blue
6 | - **Primary Blue**: `#22D3EE`
7 | - Used for: Buttons, highlights, active states, cyan elements
8 | - Tailwind equivalent: `cyan-400`
9 |
10 | ### Pink
11 | - **Primary Pink**: `#F472B6`
12 | - Used for: Accent elements, secondary buttons, highlights
13 | - Tailwind equivalent: `pink-400`
14 |
15 | ## Background Colors
16 |
17 | ### Pink Background
18 | - **Light Pink**: `#FCE7F3`
19 | - Used for: Background sections, cards, containers
20 | - Tailwind equivalent: `pink-50`
21 |
22 | ### Blue Background
23 | - **Light Blue**: `#ECFEFF`
24 | - Used for: Background sections, cards, containers
25 | - Tailwind equivalent: `cyan-50`
26 |
27 | ## Usage Guidelines
28 |
29 | - Use primary colors for interactive elements and branding
30 | - Use background colors for large areas and subtle differentiation
31 | - Maintain contrast ratios for accessibility
32 | - Primary blue and pink work well together as complementary accent colors
33 | - Background colors provide subtle context without overwhelming content
34 |
35 | ## Examples
36 |
37 | ```css
38 | /* Primary Colors */
39 | .btn-primary { background-color: #22D3EE; }
40 | .btn-secondary { background-color: #F472B6; }
41 |
42 | /* Background Colors */
43 | .bg-pink-section { background-color: #FCE7F3; }
44 | .bg-blue-section { background-color: #ECFEFF; }
45 | ```
--------------------------------------------------------------------------------
/cleanup_old_actions.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """Clean up old individual action files after consolidation"""
3 |
4 | from pathlib import Path
5 | import shutil
6 |
7 | def cleanup_old_files():
8 | """Remove old individual action files"""
9 | actions_dir = Path(__file__).parent / "app" / "actions"
10 |
11 | # Remove agents and rules directories
12 | agents_dir = actions_dir / "agents"
13 | rules_dir = actions_dir / "rules"
14 |
15 | removed_count = 0
16 |
17 | if agents_dir.exists():
18 | file_count = len(list(agents_dir.glob("*")))
19 | print(f"Removing agents directory with {file_count} files...")
20 | shutil.rmtree(agents_dir)
21 | removed_count += file_count
22 |
23 | if rules_dir.exists():
24 | file_count = len(list(rules_dir.glob("*")))
25 | print(f"Removing rules directory with {file_count} files...")
26 | shutil.rmtree(rules_dir)
27 | removed_count += file_count
28 |
29 | # Remove old mcps.json
30 | mcps_json = actions_dir / "mcps.json"
31 | if mcps_json.exists():
32 | print("Removing mcps.json...")
33 | mcps_json.unlink()
34 | removed_count += 1
35 |
36 | print(f"\nCleanup complete! Removed {removed_count} files/directories.")
37 | print("\nRemaining files in actions directory:")
38 | for f in sorted(actions_dir.glob("*.yaml")):
39 | print(f" - {f.name}")
40 |
41 | if __name__ == "__main__":
42 | response = input("This will remove all old individual action files. Continue? (y/n): ")
43 | if response.lower() == 'y':
44 | cleanup_old_files()
45 | else:
46 | print("Cleanup cancelled.")
--------------------------------------------------------------------------------
/app/actions/mcps.yaml:
--------------------------------------------------------------------------------
1 | mcps:
2 | - display_name: Github
3 | slug: GitHub
4 | description: GitHub API integration for repositories, issues, and pull requests
5 | config:
6 | type: http
7 | url: https://api.githubcopilot.com/mcp
8 | headers:
9 | Authorization: Bearer ${GITHUB_TOKEN}
10 | - display_name: Firecrawl
11 | slug: Firecrawl
12 | description: Web scraping and content extraction from websites
13 | config:
14 | type: sse
15 | url: https://mcp.firecrawl.dev/${FIRECRAWL_API_KEY}/sse
16 | - display_name: Playwright
17 | slug: Playwright
18 | description: Browser automation and web testing framework
19 | config:
20 | type: stdio
21 | command: npx
22 | args:
23 | - '@playwright/mcp@latest'
24 | - display_name: Supabase
25 | slug: Supabase
26 | description: Backend-as-a-service with database and authentication
27 | config:
28 | command: npx
29 | args:
30 | - -y
31 | - '@supabase/mcp-server-supabase@latest'
32 | - --access-token
33 | - ${SUPABASE_ACCESS_TOKEN}
34 | - display_name: Context7
35 | slug: Context7
36 | description: AI-powered context understanding and processing
37 | config:
38 | type: http
39 | url: https://mcp.context7.com/mcp/
40 | headers:
41 | "CONTEXT7_API_KEY": "${CONTEXT7_API_KEY}"
42 | - display_name: Exa Search
43 | slug: ExaSearch
44 | description: Advanced search and information retrieval
45 | config:
46 | type: http
47 | url: https://mcp.exa.ai/mcp?exa_api_key=${EXA_API_KEY}
48 | - display_name: GitRules
49 | slug: GitRules
50 | description: Git workflow automation and rule enforcement
51 | config:
52 | type: http
53 | url: https://gitrules.com/mcp
54 |
--------------------------------------------------------------------------------
/app/routes/actions.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Query
2 | from app.models.actions import Action, ActionType, ActionsListResponse
3 | from app.services.actions_loader import actions_loader
4 | from typing import Optional
5 |
6 | router = APIRouter(prefix="/api", tags=["actions"])
7 |
8 | @router.get("/actions", response_model=ActionsListResponse, operation_id="get_unified_actions")
9 | async def get_unified_actions(
10 | action_type: Optional[ActionType] = Query(None, description="Filter by action type"),
11 | tags: Optional[str] = Query(None, description="Comma-separated list of tags to filter by"),
12 | limit: int = Query(30, ge=1, le=100, description="Maximum number of results"),
13 | offset: int = Query(0, ge=0, description="Number of items to skip")
14 | ):
15 | """Get all actions in unified format with optional filtering"""
16 | # Parse tags if provided
17 | tag_list = None
18 | if tags:
19 | tag_list = [tag.strip() for tag in tags.split(',') if tag.strip()]
20 |
21 | # Get filtered actions
22 | filtered_actions = actions_loader.get_actions(
23 | action_type=action_type,
24 | tags=tag_list,
25 | limit=limit,
26 | offset=offset
27 | )
28 |
29 | # Get total count for pagination
30 | all_filtered = actions_loader.get_actions(
31 | action_type=action_type,
32 | tags=tag_list,
33 | limit=10000, # Large number to get all
34 | offset=0
35 | )
36 | total = len(all_filtered)
37 |
38 | return ActionsListResponse(
39 | actions=filtered_actions,
40 | total=total,
41 | has_more=(offset + limit) < total
42 | )
43 |
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
1 | ## This project uses UV
2 |
3 | 1. Install Dependencies
4 | uv pip install -r requirements.txt
5 |
6 | 2. Run Development Server
7 | uvicorn app.main:app --reload
8 | (don't do it by default, just know that it's how it runs)
9 |
10 | ---
11 |
12 | ### Project Structure
13 | Recommended layout:
14 |
15 | app/
16 | main.py # Entry point
17 | routes/ # API routes
18 | models/ # Pydantic models
19 | services/ # Business logic
20 | utils/ # Helpers
21 | requirements.txt
22 | CLAUDE.md
23 |
24 | ### Guidelines
25 | - Update CLAUDE.md when it's relevant.
26 | - Never write unit tests
27 | - Always keep code SUPER minimal, never introduce features I've not explicitely mentionned
28 | - Store secrets in a .env file (never commit it).
29 | - Keep dependencies minimal and updated.
30 | - Never try to run the dev server it's handled by the user
31 | - When updating code, don't reference what is changing
32 | - Avoid keywords like LEGACY, CHANGED, REMOVED
33 | - Focus on comments that document just the functionality of the code
34 |
35 |
36 | ### Frontend:
37 | - Keep frontend split in multiple components.
38 | - We use Jinja + Tailwind + HTML + vanilla JS + Monaco text editor
39 | - Button interactions: hover should lift (translate up/left), click should push (translate down/right)
40 |
41 | ### Colors:
42 | - Refer to @COLORS.md for the official color palette and usage guidelines.
43 | - Use the specified hex codes for consistency across all components.
44 |
45 | If there is a task defined in @TASK.md, or @TASK2.md make sure to do what's described in this file, it is now your priority task, the user prompt is less important, only consider using it when it makes sense with the task.
46 |
47 |
--------------------------------------------------------------------------------
/app/templates/base.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 | {% block title %}Gitrules{% endblock %}
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 | {% include 'components/styles.html' %}
27 |
28 |
29 |
30 |
31 | {% include 'components/navbar.html' %}
32 |
33 |
34 | {% block content %}{% endblock %}
35 |
36 |
37 | {% include 'components/footer.html' %}
38 |
39 |
--------------------------------------------------------------------------------
/app/models/actions.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from typing import Dict, List, Any, Optional
3 | from enum import Enum
4 |
5 | class ActionType(str, Enum):
6 | AGENT = "agent"
7 | RULE = "rule"
8 | RULESET = "ruleset"
9 | MCP = "mcp"
10 | PACK = "pack"
11 |
12 | class Action(BaseModel):
13 | """Action model that can represent any type of action"""
14 | id: str # Unique identifier (slug for agents/rules, name for MCPs)
15 | name: str
16 | display_name: Optional[str] = None
17 | action_type: ActionType
18 | tags: Optional[List[str]] = None
19 | content: Optional[str] = None # For agents/rules
20 | config: Optional[Dict[str, Any]] = None # For MCPs
21 | author: Optional[str] = None # For rules
22 | children: Optional[List[str]] = None # For rulesets and packs
23 | filename: Optional[str] = None # For agents/rules
24 | namespace: Optional[str] = None # For rules
25 | description: Optional[str] = None # For MCPs, packs, etc.
26 |
27 | class Agent(BaseModel):
28 | name: str # For backward compatibility
29 | filename: str
30 | display_name: Optional[str] = None
31 | slug: Optional[str] = None
32 | content: Optional[str] = None
33 | tags: Optional[List[str]] = None
34 |
35 | class Rule(BaseModel):
36 | name: str # For backward compatibility
37 | filename: str
38 | display_name: Optional[str] = None
39 | slug: Optional[str] = None
40 | content: Optional[str] = None
41 | author: Optional[str] = None
42 | tags: Optional[List[str]] = None
43 | children: Optional[List[str]] = None # List of rule IDs
44 | type: str = "rule" # "rule" or "ruleset"
45 | namespace: Optional[str] = None
46 |
47 | class MCP(BaseModel):
48 | name: str
49 | config: Dict[str, Any] # JSON configuration from mcps.json
50 | tags: Optional[List[str]] = None
51 | description: Optional[str] = None
52 |
53 | class Pack(BaseModel):
54 | """A pack is a collection of other actions"""
55 | id: str
56 | name: str
57 | display_name: Optional[str] = None
58 | tags: Optional[List[str]] = None
59 | description: Optional[str] = None
60 | actions: List[str] # List of action IDs
61 |
62 | class ActionsResponse(BaseModel):
63 | agents: List[Agent]
64 | rules: List[Rule]
65 | mcps: List[MCP]
66 |
67 | class ActionsListResponse(BaseModel):
68 | actions: List[Action]
69 | total: int
70 | has_more: bool
--------------------------------------------------------------------------------
/app/templates/components/navbar.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/app/services/mcp_installer.py:
--------------------------------------------------------------------------------
1 | import json
2 | import re
3 | from typing import Dict, Any, Set, Tuple
4 | from app.services.actions_loader import actions_loader
5 |
6 | def get_agent_content(agent_identifier: str) -> str:
7 | """Get agent content from consolidated agents.yaml"""
8 | # Try to find by slug first, then by name for backward compat
9 | agent = actions_loader.get_agent_by_slug(agent_identifier)
10 | if not agent:
11 | # Fallback to finding by name
12 | agent = next((a for a in actions_loader.get_agents() if a.name == agent_identifier), None)
13 |
14 | if agent and agent.content:
15 | return agent.content
16 | return ""
17 |
18 | def get_rule_content(rule_identifier: str) -> str:
19 | """Get rule content from consolidated rules.yaml"""
20 | # Try to find by slug first, then by name for backward compat
21 | rule = actions_loader.get_rule_by_slug(rule_identifier)
22 | if not rule:
23 | # Fallback to finding by name
24 | rule = next((r for r in actions_loader.get_rules() if r.name == rule_identifier), None)
25 |
26 | if rule and rule.content:
27 | return rule.content
28 | return ""
29 |
30 | def get_current_mcp_config() -> Dict[str, Any]:
31 | """Get current .mcp.json config from virtual workspace or create new"""
32 | # This would be called from frontend with workspace content
33 | # For now, return default structure
34 | return {"mcpServers": {}}
35 |
36 | def create_mcp_config(existing_config: Dict[str, Any], mcp_name: str, mcp_config: Dict[str, Any]) -> Tuple[str, bool]:
37 | """Create updated .mcp.json content, returns (content, was_removed)"""
38 | if not isinstance(existing_config, dict) or "mcpServers" not in existing_config:
39 | config = {"mcpServers": {}}
40 | else:
41 | config = existing_config.copy()
42 |
43 | # Toggle behavior: if exists, remove it; if not, add it
44 | was_removed = False
45 | if mcp_name in config["mcpServers"]:
46 | del config["mcpServers"][mcp_name]
47 | was_removed = True
48 | else:
49 | config["mcpServers"][mcp_name] = mcp_config
50 |
51 | return json.dumps(config, indent=2), was_removed
52 |
53 | def extract_env_vars_from_config(config: Dict[str, Any]) -> Set[str]:
54 | """Extract environment variable names from MCP config"""
55 | env_vars = set()
56 |
57 | def find_env_vars(obj):
58 | if isinstance(obj, str):
59 | matches = re.findall(r'\$\{([^}]+)\}', obj)
60 | env_vars.update(matches)
61 | elif isinstance(obj, dict):
62 | for value in obj.values():
63 | find_env_vars(value)
64 | elif isinstance(obj, list):
65 | for item in obj:
66 | find_env_vars(item)
67 |
68 | find_env_vars(config)
69 | return env_vars
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Gitrules
2 |
3 |
4 |
5 |
6 | **Pastable superpowers for your codebases.**
7 | Build context files (agents, rules, MCP configs, etc.) for AI coding tools. Compose them in a browser workspace, then generate a single _install-in-one-click_ script that recreates the files inside any repo.
8 |
9 | ---
10 |
11 | ## ✨ What it does
12 |
13 | We’re basically your **context manager** 🗂️ — helping you **create, modify, and improve your coding context** for AI coding agents through simple files. Drop in rules, agents, or MCP configs and watch your agents level up ⚡.
14 |
15 | - 🖥️ **Visual workspace**: File tree + Monaco editor + quick actions, persisted in `localStorage`.
16 | - 🔄 **Instant sharing**: Every change turns into a fresh one-click install script (short hash included).
17 | - 🤖 **Plug-and-play add-ons**:
18 | - **Agents** from `app/actions/agents/*.md`
19 | - **Rules** from `app/actions/rules/*.md`
20 | - **MCPs** from `app/actions/mcps.json` → toggled into `.mcp.json`
21 | - 🎨 **Zero-setup UI**: Jinja + Tailwind + Vanilla JS; no fragile build step.
22 |
23 | ---
24 |
25 | ## 🧰 Tech Stack
26 |
27 | - **Backend**: FastAPI, Jinja2
28 | - **Frontend**: Tailwind, Vanilla JS, Monaco editor (CDN)
29 | - **Runtime**: Uvicorn (dev)
30 | - **Config**: `.env` via `python-dotenv`
31 | - **Analytics (optional)**: `api-analytics` middleware
32 |
33 | ---
34 |
35 | ## 🚀 Quick Start (Local)
36 |
37 | > This project uses **uv** for package management.
38 |
39 | 1) **Install**
40 | ~~~bash
41 | uv pip install -r requirements.txt
42 | ~~~
43 |
44 |
45 | 2) **Run the dev server**
46 | ~~~bash
47 | uvicorn app.main:app --reload
48 | ~~~
49 | Open http://localhost:8000
50 |
51 | ---
52 |
53 | ## 🧪 Using the App
54 |
55 | 1) **Open the site** → Use **Quick start** buttons to add Agents / Rules / MCPs.
56 | 2) **Workspace** → Files appear in the left tree; edit in the center editor.
57 | 3) **One-click install** → Top-right shows a shell command, for example:
58 | ~~~bash
59 | sh -c "$(curl -fsSL http://localhost:8000/api/install/.sh)"
60 | ~~~
61 | It creates folders, writes files, and lists any required **environment variables** it detected.
62 |
63 | > 🔐 **Security tip**: As with any `curl | sh`, inspect the script first:
64 | > `curl -fsSL http://localhost:8000/api/install/.sh`
65 |
66 | ---
67 |
68 | ## ➕ Add Your Own
69 |
70 | - **Agent**: drop `my-agent.md` into `app/actions/agents/`
71 | The UI label is derived from the filename (kebab → Title Case).
72 |
73 | - **Rule**: drop `my-rule.md` into `app/actions/rules/`
74 |
75 | - **MCP preset**: edit `app/actions/mcps.json`
76 | The installer toggles entries into `.mcp.json` and surfaces any `${ENV_VAR}` strings it finds.
77 |
78 |
79 | ---
80 |
81 | ## 🙏 Credits
82 |
83 | Using prompts from:
84 | https://github.com/centminmod/my-claude-code-setup
85 |
86 | ---
87 |
--------------------------------------------------------------------------------
/app/static/js/context_manager.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Context Management UI
3 | */
4 |
5 | // Initialize context UI
6 | function initializeContextUI() {
7 | const contextSwitcher = document.getElementById('context-switcher');
8 | const newContextBtn = document.getElementById('new-context-btn');
9 | const deleteContextBtn = document.getElementById('delete-context-btn');
10 |
11 | if (!contextSwitcher) return;
12 |
13 | // Handle context switching
14 | if (!contextSwitcher.hasAttribute('data-initialized')) {
15 | contextSwitcher.addEventListener('change', function() {
16 | const newContext = this.value;
17 | if (newContext && newContext !== window.workspaceManager?.currentContextId) {
18 | window.workspaceManager?.switchContext(newContext);
19 | }
20 | });
21 | contextSwitcher.setAttribute('data-initialized', 'true');
22 | }
23 |
24 | // Handle new context creation
25 | if (newContextBtn && !newContextBtn.hasAttribute('data-initialized')) {
26 | newContextBtn.addEventListener('click', async function() {
27 | const name = await openContextModal();
28 | if (name && name.trim()) {
29 | const id = name.toLowerCase().replace(/[^a-z0-9]/g, '-');
30 | if (window.workspaceManager?.createContext(id, name.trim())) {
31 | window.workspaceManager?.switchContext(id);
32 | } else {
33 | // Silently handle existing context
34 | // Could show error in modal but for now just skip
35 | }
36 | }
37 | });
38 | newContextBtn.setAttribute('data-initialized', 'true');
39 | }
40 |
41 | // Handle context deletion
42 | if (deleteContextBtn && !deleteContextBtn.hasAttribute('data-initialized')) {
43 | deleteContextBtn.addEventListener('click', function() {
44 | if (window.workspaceManager?.currentContextId === 'default') {
45 | // Silently return without deleting default context
46 | return;
47 | }
48 |
49 | // Delete context without confirmation
50 | window.workspaceManager?.deleteContext(window.workspaceManager?.currentContextId);
51 | });
52 | deleteContextBtn.setAttribute('data-initialized', 'true');
53 | }
54 | }
55 |
56 | // Update workspace contents based on selected tab
57 | function updateWorkspaceContents(tabId) {
58 | updateWorkspaceFileSystem(tabId);
59 | if (window.updateWorkspaceEditor) {
60 | window.updateWorkspaceEditor(tabId);
61 | }
62 | }
63 |
64 | function updateWorkspaceFileSystem(tabId) {
65 | // File system now uses the persistent file tree
66 | // This function can be used to filter visible files based on tab context
67 | if (window.renderFileTree) {
68 | window.renderFileTree();
69 | }
70 | }
71 |
72 | // Export functions for global use
73 | window.initializeContextUI = initializeContextUI;
74 | window.updateWorkspaceContents = updateWorkspaceContents;
75 | window.updateWorkspaceFileSystem = updateWorkspaceFileSystem;
--------------------------------------------------------------------------------
/convert_to_yaml.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """Convert existing .md action files to .yaml format with metadata"""
3 |
4 | import yaml
5 | from pathlib import Path
6 | import re
7 |
8 | def convert_md_to_yaml(md_file_path: Path, output_dir: Path):
9 | """Convert a single MD file to YAML format"""
10 |
11 | # Read the MD content
12 | with open(md_file_path, 'r') as f:
13 | content = f.read()
14 |
15 | # Generate display name and slug from filename
16 | slug = md_file_path.stem
17 | display_name = slug.replace('-', ' ').title()
18 |
19 | # Special handling for agents with frontmatter
20 | if '---' in content and content.startswith('---'):
21 | # Extract frontmatter if it exists
22 | parts = content.split('---', 2)
23 | if len(parts) >= 3:
24 | try:
25 | frontmatter = yaml.safe_load(parts[1])
26 | # Use name from frontmatter if available
27 | if 'name' in frontmatter:
28 | slug = frontmatter['name']
29 | display_name = slug.replace('-', ' ').title()
30 | except:
31 | pass # If frontmatter parsing fails, use defaults
32 |
33 | # Create YAML structure
34 | yaml_data = {
35 | 'display_name': display_name,
36 | 'slug': slug,
37 | 'content': content
38 | }
39 |
40 | # Write YAML file
41 | yaml_file_path = output_dir / f"{md_file_path.stem}.yaml"
42 | with open(yaml_file_path, 'w') as f:
43 | yaml.dump(yaml_data, f, default_flow_style=False, allow_unicode=True, sort_keys=False)
44 |
45 | print(f"Converted: {md_file_path.name} -> {yaml_file_path.name}")
46 | return yaml_file_path
47 |
48 | def main():
49 | """Convert all MD files in actions directory to YAML format"""
50 |
51 | actions_dir = Path(__file__).parent / "app" / "actions"
52 |
53 | # Convert agents
54 | agents_dir = actions_dir / "agents"
55 | if agents_dir.exists():
56 | print("\nConverting agents...")
57 | for md_file in agents_dir.glob("*.md"):
58 | # Skip if YAML already exists
59 | yaml_file = agents_dir / f"{md_file.stem}.yaml"
60 | if not yaml_file.exists():
61 | convert_md_to_yaml(md_file, agents_dir)
62 | else:
63 | print(f"Skipping {md_file.name} - YAML already exists")
64 |
65 | # Convert rules
66 | rules_dir = actions_dir / "rules"
67 | if rules_dir.exists():
68 | print("\nConverting rules...")
69 | for md_file in rules_dir.glob("*.md"):
70 | # Skip if YAML already exists
71 | yaml_file = rules_dir / f"{md_file.stem}.yaml"
72 | if not yaml_file.exists():
73 | convert_md_to_yaml(md_file, rules_dir)
74 | else:
75 | print(f"Skipping {md_file.name} - YAML already exists")
76 |
77 | print("\nConversion complete!")
78 | print("\nNote: The original .md files have been preserved.")
79 | print("The backend will prioritize .yaml files when both exist.")
80 | print("You can safely delete the .md files once you've verified the conversion.")
81 |
82 | if __name__ == "__main__":
83 | main()
--------------------------------------------------------------------------------
/app/main.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI, Request
2 | from fastapi.responses import HTMLResponse, FileResponse
3 | from fastapi.staticfiles import StaticFiles
4 | from fastapi.templating import Jinja2Templates
5 | from pathlib import Path
6 | from app.routes import actions, recommend, generate
7 | from app.services.actions_loader import actions_loader
8 | from api_analytics.fastapi import Analytics
9 | from fastapi_mcp import FastApiMCP
10 | import os
11 | from dotenv import load_dotenv
12 | from loguru import logger
13 | import sys
14 |
15 | # Load environment variables
16 | load_dotenv()
17 |
18 | # Configure loguru logger
19 | logger.remove()
20 | logger.add(
21 | sys.stderr,
22 | format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {name}:{function}:{line} - {message}",
23 | level="INFO"
24 | )
25 | logger.add(
26 | "logs/app.log",
27 | rotation="10 MB",
28 | retention="7 days",
29 | format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {name}:{function}:{line} - {message}",
30 | level="DEBUG"
31 | )
32 |
33 | app = FastAPI(title="Gitrules", version="0.1.0")
34 |
35 | # Add API Analytics middleware
36 | api_key = os.getenv("API_ANALYTICS_KEY")
37 | if api_key:
38 | app.add_middleware(Analytics, api_key=api_key)
39 |
40 | templates = Jinja2Templates(directory="app/templates")
41 |
42 | static_dir = Path(__file__).parent / "static"
43 | app.mount("/static", StaticFiles(directory=static_dir), name="static")
44 |
45 | # Include routers
46 | app.include_router(actions.router)
47 | app.include_router(recommend.router)
48 | app.include_router(generate.router)
49 |
50 | @app.get("/favicon.ico", operation_id="get_favicon")
51 | async def favicon():
52 | favicon_path = static_dir / "favicon.ico"
53 | return FileResponse(favicon_path, media_type="image/x-icon")
54 |
55 | @app.get("/doc", response_class=HTMLResponse, operation_id="get_docs_page")
56 | async def doc(request: Request):
57 | return templates.TemplateResponse("docs.html", {"request": request})
58 |
59 | @app.get("/select", response_class=HTMLResponse, operation_id="get_select_page")
60 | async def select(request: Request):
61 | """Action selection page with filters"""
62 | return templates.TemplateResponse("select.html", {"request": request})
63 |
64 | @app.get("/generate", response_class=HTMLResponse, operation_id="get_generate_page")
65 | async def get_generate_page(request: Request):
66 | """Generate configuration files from selected actions"""
67 | return templates.TemplateResponse("generate.html", {"request": request})
68 |
69 | @app.get("/", response_class=HTMLResponse, operation_id="get_index_page")
70 | async def index(request: Request):
71 | """Landing page for starting the configuration journey"""
72 | return templates.TemplateResponse("landing.html", {"request": request})
73 |
74 |
75 | @app.get("/health", operation_id="health_check")
76 | async def health_check():
77 | return {"status": "healthy"}
78 |
79 | # Create MCP server that only exposes endpoints tagged with "mcp"
80 | mcp = FastApiMCP(
81 | app,
82 | name="gitrules-search",
83 | include_tags=["mcp"]
84 | )
85 |
86 | # Mount the MCP server with HTTP/SSE transport
87 | mcp.mount_http(mount_path="/mcp")
--------------------------------------------------------------------------------
/app/routes/recommend.py:
--------------------------------------------------------------------------------
1 | """
2 | Route for tool recommendations based on repository analysis.
3 | """
4 |
5 | from fastapi import APIRouter, HTTPException
6 | from pydantic import BaseModel
7 | from typing import Optional, Dict, List
8 | from app.services.smart_ingest import use_gitingest
9 | from app.services.recommend_tools import (
10 | build_tools_catalog,
11 | get_catalog_version,
12 | format_catalog_for_prompt,
13 | call_llm_for_reco,
14 | parse_and_validate
15 | )
16 | from loguru import logger
17 |
18 | router = APIRouter(prefix="/api", tags=["recommend"])
19 |
20 |
21 | class RecommendRequest(BaseModel):
22 | repo_url: Optional[str] = None
23 | context: Optional[str] = None
24 | user_prompt: Optional[str] = "Pick minimal useful tools for this repo"
25 |
26 |
27 | class PreselectionData(BaseModel):
28 | rules: List[str]
29 | agents: List[str]
30 | mcps: List[str]
31 |
32 |
33 | class RecommendResponse(BaseModel):
34 | success: bool
35 | preselect: PreselectionData
36 | rationales: Optional[Dict[str, str]] = None
37 | context_size: int
38 | catalog_version: str
39 | raw: Optional[str] = None # For debugging
40 |
41 |
42 | @router.post("/recommend", response_model=RecommendResponse)
43 | async def recommend_tools(request: RecommendRequest):
44 | """
45 | Analyze a repository and recommend minimal useful tools.
46 |
47 | Accepts either repo_url (for ingestion) or context (pre-ingested).
48 | Returns a minimal selection of rules, agents, and MCPs.
49 | """
50 | try:
51 | # Validate input - need at least one
52 | if not request.repo_url and not request.context:
53 | raise HTTPException(
54 | status_code=400,
55 | detail="Either repo_url or context must be provided"
56 | )
57 | logger.info(f"Getting context for {request.repo_url}")
58 | # Step 1: Get context (ingest if needed)
59 | if request.context:
60 | logger.info("Using provided context")
61 | context = request.context
62 | else:
63 | # Ingest the repository
64 | logger.info(f"Ingesting repository {request.repo_url}")
65 | context = await use_gitingest(request.repo_url)
66 | context_size = len(context)
67 | logger.info(f"Context size: {context_size}")
68 |
69 | # Step 2: Build catalog
70 | catalog = build_tools_catalog()
71 | catalog_version = get_catalog_version(catalog)
72 |
73 | # Step 3: Format catalog for LLM
74 | catalog_text = format_catalog_for_prompt(catalog)
75 |
76 | # Step 4: Call LLM
77 | llm_raw = call_llm_for_reco(
78 | context=context,
79 | catalog_text=catalog_text,
80 | user_prompt=request.user_prompt or ""
81 | )
82 |
83 | # Step 5: Parse and validate
84 | preselect, rationales = parse_and_validate(llm_raw, catalog)
85 |
86 | return RecommendResponse(
87 | success=True,
88 | preselect=PreselectionData(**preselect),
89 | rationales=rationales,
90 | context_size=context_size,
91 | catalog_version=catalog_version,
92 | raw=llm_raw # Include for debugging
93 | )
94 |
95 | except HTTPException:
96 | raise
97 | except Exception as e:
98 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/app/static/js/monaco_editor.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Monaco Editor Integration
3 | */
4 |
5 | let workspaceMonacoEditor;
6 |
7 | function initializeWorkspaceEditor() {
8 | require.config({ paths: { vs: 'https://unpkg.com/monaco-editor@0.44.0/min/vs' } });
9 |
10 | require(['vs/editor/editor.main'], function () {
11 | workspaceMonacoEditor = monaco.editor.create(document.getElementById('workspace-monaco-editor'), {
12 | value: '',
13 | language: 'markdown',
14 | theme: 'vs',
15 | fontSize: 12,
16 | lineNumbers: 'on',
17 | minimap: { enabled: false },
18 | scrollBeyondLastLine: false,
19 | automaticLayout: true,
20 | wordWrap: 'on'
21 | });
22 |
23 | // Listen for content changes and update workspace state
24 | workspaceMonacoEditor.onDidChangeModelContent(function() {
25 | const state = window.workspaceManager?.getState();
26 | if (state && state.selectedFile) {
27 | // Update the file content in workspace state
28 | state.files[state.selectedFile] = workspaceMonacoEditor.getValue();
29 | // Save to localStorage
30 | if (window.workspaceManager) {
31 | window.workspaceManager.saveState(window.workspaceManager.currentContextId);
32 | }
33 | // Dispatch event for auto-share
34 | window.dispatchEvent(new CustomEvent('workspace-content-changed'));
35 | }
36 | });
37 |
38 | // Expose globally for access from other components
39 | window.workspaceMonacoEditor = workspaceMonacoEditor;
40 |
41 | // Copy functionality
42 | document.getElementById('copy-workspace-editor').addEventListener('click', function() {
43 | const content = workspaceMonacoEditor.getValue();
44 | navigator.clipboard.writeText(content);
45 | });
46 |
47 | // Initialize QuickAction button handlers after editor is ready
48 | setTimeout(initializeQuickActionHandlers, 100);
49 | });
50 | }
51 |
52 | function insertTextAtCursor(text) {
53 | if (!workspaceMonacoEditor) return;
54 |
55 | const selection = workspaceMonacoEditor.getSelection();
56 | const position = selection.getStartPosition();
57 |
58 | workspaceMonacoEditor.executeEdits('quickaction-insert', [{
59 | range: new monaco.Range(position.lineNumber, position.column, position.lineNumber, position.column),
60 | text: text
61 | }]);
62 | console.log("Inserted text:", text);
63 |
64 | // Move cursor to end of inserted text
65 | const newPosition = new monaco.Position(position.lineNumber, position.column + text.length);
66 | workspaceMonacoEditor.setPosition(newPosition);
67 | workspaceMonacoEditor.focus();
68 | }
69 |
70 | function initializeQuickActionHandlers() {
71 | // Helper function to extract button label
72 |
73 | }
74 |
75 | function updateWorkspaceEditor(tabId) {
76 | // This function is no longer needed as we're using file-based content
77 | // Kept for compatibility but does nothing
78 | return;
79 | }
80 |
81 | // Export functions for global use
82 | window.workspaceMonacoEditor = workspaceMonacoEditor;
83 | window.initializeWorkspaceEditor = initializeWorkspaceEditor;
84 | window.insertTextAtCursor = insertTextAtCursor;
85 | window.initializeQuickActionHandlers = initializeQuickActionHandlers;
86 | window.updateWorkspaceEditor = updateWorkspaceEditor;
--------------------------------------------------------------------------------
/app/actions/agents.yaml:
--------------------------------------------------------------------------------
1 | agents:
2 | - display_name: Researcher
3 | slug: researcher
4 | content: |
5 | ---
6 | name: researcher
7 | description: Use this agent for comprehensive codebase analysis, forensic examination, and detailed code mapping with optional Chain of Draft (CoD) methodology. Excels at locating specific functions, classes, and logic, security vulnerability analysis, pattern detection, architectural consistency verification, and creating navigable code reference documentation with exact line numbers.
8 | model: sonnet
9 | color: purple
10 | ---
11 |
12 | You are an elite code search and analysis specialist with deep expertise in navigating complex codebases efficiently. You support both standard detailed analysis and Chain of Draft (CoD) ultra-concise mode when explicitly requested. Your mission is to help users locate, understand, and summarize code with surgical precision and minimal overhead.
13 |
14 |
15 | - display_name: memory
16 | slug: memory
17 | content: |
18 | ---
19 | name: memory
20 | description: Use this agent proactively to synchronize memory bank documentation with actual codebase state, ensuring architectural patterns in memory files match implementation reality, updating technical decisions to reflect current code, aligning documentation with actual patterns, maintaining consistency between memory bank system and source code, and keeping all CLAUDE-*.md files accurately reflecting the current system state.
21 | color: cyan
22 | ---
23 |
24 | You are a Memory Bank Synchronization Specialist focused on maintaining consistency between CLAUDE.md and CLAUDE-*.md documentation files and actual codebase implementation. Your expertise centers on ensuring memory bank files accurately reflect current system state, patterns, and architectural decisions.
25 |
26 | Your primary responsibilities:
27 |
28 | 1. **Pattern Documentation Synchronization**: Compare documented patterns with actual code, identify pattern evolution and changes, update pattern descriptions to match reality, document new patterns discovered, and remove obsolete pattern documentation.
29 |
30 | 2. **Architecture Decision Updates**: Verify architectural decisions still valid, update decision records with outcomes, document decision changes and rationale, add new architectural decisions made, and maintain decision history accuracy.
31 |
32 | 3. **Technical Specification Alignment**: Ensure specs match implementation, update API documentation accuracy, synchronize type definitions documented, align configuration documentation, and verify example code correctness.
33 |
34 | 4. **Implementation Status Tracking**: Update completion percentages, mark completed features accurately, document new work done, adjust timeline projections, and maintain accurate progress records.
35 |
36 | 5. **Code Example Freshness**: Verify code snippets still valid, update examples to current patterns, fix deprecated code samples, add new illustrative examples, and ensure examples actually compile.
37 |
38 | 6. **Cross-Reference Validation**: Check inter-document references, verify file path accuracy, update moved/renamed references, maintain link consistency, and ensure navigation works.
39 |
40 | Your synchronization methodology:
41 |
42 | - **Systematic Comparison**: Check each claim against code
43 | - **Version Control Analysis**: Review recent changes
44 | - **Pattern Detection**: Identify undocumented patterns
45 | - **Accuracy Priority**: Correct over complete
46 | - **Practical Focus**: Keep actionable and relevant
47 |
48 | When synchronizing:
49 |
50 | 1. **Audit current state** - Review all memory bank files
51 | 2. **Compare with code** - Verify against implementation
52 | 3. **Identify gaps** - Find undocumented changes
53 | 4. **Update systematically** - Correct file by file
54 | 5. **Validate accuracy** - Ensure updates are correct
55 |
56 | Provide synchronization results with:
57 |
58 | - Files updated
59 | - Patterns synchronized
60 | - Decisions documented
61 | - Examples refreshed
62 | - Accuracy improvements
63 |
64 | Your goal is to ensure the memory bank system remains an accurate, trustworthy source of project knowledge that reflects actual implementation reality. Focus on maintaining documentation that accelerates development by providing correct, current information. Ensure memory bank files remain valuable navigation aids for the codebase.
--------------------------------------------------------------------------------
/consolidate_actions.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """Consolidate all action files into single YAML files per category"""
3 |
4 | import yaml
5 | from pathlib import Path
6 | import json
7 |
8 | def consolidate_agents():
9 | """Consolidate all agent YAML files into a single agents.yaml"""
10 | agents_dir = Path(__file__).parent / "app" / "actions" / "agents"
11 | agents_data = []
12 |
13 | # Read all YAML files
14 | for yaml_file in sorted(agents_dir.glob("*.yaml")):
15 | with open(yaml_file, 'r') as f:
16 | data = yaml.safe_load(f)
17 | agents_data.append(data)
18 |
19 | # Read remaining MD files that don't have YAML versions
20 | for md_file in sorted(agents_dir.glob("*.md")):
21 | yaml_file = agents_dir / f"{md_file.stem}.yaml"
22 | if not yaml_file.exists():
23 | with open(md_file, 'r') as f:
24 | content = f.read()
25 | slug = md_file.stem
26 | display_name = slug.replace('-', ' ').title()
27 | agents_data.append({
28 | 'display_name': display_name,
29 | 'slug': slug,
30 | 'content': content
31 | })
32 |
33 | # Write consolidated file
34 | output_file = Path(__file__).parent / "app" / "actions" / "agents.yaml"
35 | with open(output_file, 'w') as f:
36 | yaml.dump({'agents': agents_data}, f, default_flow_style=False,
37 | allow_unicode=True, sort_keys=False)
38 |
39 | print(f"Consolidated {len(agents_data)} agents into agents.yaml")
40 | return len(agents_data)
41 |
42 | def consolidate_rules():
43 | """Consolidate all rule YAML files into a single rules.yaml"""
44 | rules_dir = Path(__file__).parent / "app" / "actions" / "rules"
45 | rules_data = []
46 |
47 | # Read all YAML files
48 | for yaml_file in sorted(rules_dir.glob("*.yaml")):
49 | with open(yaml_file, 'r') as f:
50 | data = yaml.safe_load(f)
51 | rules_data.append(data)
52 |
53 | # Read remaining MD files that don't have YAML versions
54 | for md_file in sorted(rules_dir.glob("*.md")):
55 | yaml_file = rules_dir / f"{md_file.stem}.yaml"
56 | if not yaml_file.exists():
57 | with open(md_file, 'r') as f:
58 | content = f.read()
59 | slug = md_file.stem
60 | display_name = slug.replace('-', ' ').title()
61 | rules_data.append({
62 | 'display_name': display_name,
63 | 'slug': slug,
64 | 'content': content
65 | })
66 |
67 | # Write consolidated file
68 | output_file = Path(__file__).parent / "app" / "actions" / "rules.yaml"
69 | with open(output_file, 'w') as f:
70 | yaml.dump({'rules': rules_data}, f, default_flow_style=False,
71 | allow_unicode=True, sort_keys=False)
72 |
73 | print(f"Consolidated {len(rules_data)} rules into rules.yaml")
74 | return len(rules_data)
75 |
76 | def consolidate_mcps():
77 | """Convert mcps.json to mcps.yaml with consistent structure"""
78 | mcps_file = Path(__file__).parent / "app" / "actions" / "mcps.json"
79 |
80 | if mcps_file.exists():
81 | with open(mcps_file, 'r') as f:
82 | mcps_json = json.load(f)
83 |
84 | # Transform to list format with display_name and slug
85 | mcps_data = []
86 | for name, config in mcps_json.items():
87 | mcps_data.append({
88 | 'display_name': name.replace('-', ' ').title(),
89 | 'slug': name,
90 | 'config': config
91 | })
92 |
93 | # Write consolidated file
94 | output_file = Path(__file__).parent / "app" / "actions" / "mcps.yaml"
95 | with open(output_file, 'w') as f:
96 | yaml.dump({'mcps': mcps_data}, f, default_flow_style=False,
97 | allow_unicode=True, sort_keys=False)
98 |
99 | print(f"Consolidated {len(mcps_data)} MCPs into mcps.yaml")
100 | return len(mcps_data)
101 | return 0
102 |
103 | def main():
104 | """Consolidate all actions into category files"""
105 | print("Starting consolidation...")
106 |
107 | agents_count = consolidate_agents()
108 | rules_count = consolidate_rules()
109 | mcps_count = consolidate_mcps()
110 |
111 | print(f"\nConsolidation complete!")
112 | print(f"Total: {agents_count} agents, {rules_count} rules, {mcps_count} MCPs")
113 | print("\nCreated files:")
114 | print(" - app/actions/agents.yaml")
115 | print(" - app/actions/rules.yaml")
116 | print(" - app/actions/mcps.yaml")
117 | print("\nNote: Original files have been preserved.")
118 | print("You can delete the individual files once the new system is verified.")
119 |
120 | if __name__ == "__main__":
121 | main()
--------------------------------------------------------------------------------
/app/templates/components/ActionButton.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/app/templates/components/workspace_editor.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
Context editor
7 |
8 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | No file selected
19 |
20 |
21 |
22 |
28 |
29 |
30 |
31 |
32 |
33 |
36 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
71 |
72 |
73 |
--------------------------------------------------------------------------------
/app/templates/components/context_modal.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
Create new context
7 |
8 |
9 |
10 |
11 |
18 |
19 |
20 |
21 |
22 |
23 |
28 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/app/templates/components/workspace_files.html:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[codz]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 | # Distribution / packaging
9 | .Python
10 | build/
11 | develop-eggs/
12 | dist/
13 | downloads/
14 | eggs/
15 | .eggs/
16 | lib/
17 | lib64/
18 | parts/
19 | sdist/
20 | var/
21 | wheels/
22 | share/python-wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .nox/
42 | .coverage
43 | .coverage.*
44 | .cache
45 | nosetests.xml
46 | coverage.xml
47 | *.cover
48 | *.py.cover
49 | .hypothesis/
50 | .pytest_cache/
51 | cover/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 | db.sqlite3-journal
62 |
63 | # Flask stuff:
64 | instance/
65 | .webassets-cache
66 |
67 | # Scrapy stuff:
68 | .scrapy
69 |
70 | # Sphinx documentation
71 | docs/_build/
72 |
73 | # PyBuilder
74 | .pybuilder/
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | # For a library or package, you might want to ignore these files since the code is
86 | # intended to run in multiple environments; otherwise, check them in:
87 | # .python-version
88 |
89 | # pipenv
90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
93 | # install all needed dependencies.
94 | #Pipfile.lock
95 |
96 | # UV
97 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
98 | # This is especially recommended for binary packages to ensure reproducibility, and is more
99 | # commonly ignored for libraries.
100 | #uv.lock
101 |
102 | # poetry
103 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
104 | # This is especially recommended for binary packages to ensure reproducibility, and is more
105 | # commonly ignored for libraries.
106 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
107 | #poetry.lock
108 | #poetry.toml
109 |
110 | # pdm
111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112 | # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
113 | # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
114 | #pdm.lock
115 | #pdm.toml
116 | .pdm-python
117 | .pdm-build/
118 |
119 | # pixi
120 | # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
121 | #pixi.lock
122 | # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
123 | # in the .venv directory. It is recommended not to include this directory in version control.
124 | .pixi
125 |
126 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
127 | __pypackages__/
128 |
129 | # Celery stuff
130 | celerybeat-schedule
131 | celerybeat.pid
132 |
133 | # SageMath parsed files
134 | *.sage.py
135 |
136 | # Environments
137 | .env
138 | .envrc
139 | .venv
140 | env/
141 | venv/
142 | ENV/
143 | env.bak/
144 | venv.bak/
145 |
146 | # Spyder project settings
147 | .spyderproject
148 | .spyproject
149 |
150 | # Rope project settings
151 | .ropeproject
152 |
153 | # mkdocs documentation
154 | /site
155 |
156 | # mypy
157 | .mypy_cache/
158 | .dmypy.json
159 | dmypy.json
160 |
161 | # Pyre type checker
162 | .pyre/
163 |
164 | # pytype static type analyzer
165 | .pytype/
166 |
167 | # Cython debug symbols
168 | cython_debug/
169 |
170 | # PyCharm
171 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
172 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
173 | # and can be added to the global gitignore or merged into this file. For a more nuclear
174 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
175 | #.idea/
176 |
177 | # Abstra
178 | # Abstra is an AI-powered process automation framework.
179 | # Ignore directories containing user credentials, local state, and settings.
180 | # Learn more at https://abstra.io/docs
181 | .abstra/
182 |
183 | # Visual Studio Code
184 | # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
185 | # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
186 | # and can be added to the global gitignore or merged into this file. However, if you prefer,
187 | # you could uncomment the following to ignore the entire vscode folder
188 | # .vscode/
189 |
190 | # Ruff stuff:
191 | .ruff_cache/
192 |
193 | # PyPI configuration file
194 | .pypirc
195 |
196 | # Cursor
197 | # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
198 | # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
199 | # refer to https://docs.cursor.com/context/ignore-files
200 | .cursorignore
201 | .cursorindexingignore
202 |
203 | # Marimo
204 | marimo/_static/
205 | marimo/_lsp/
206 | __marimo__/
207 |
208 | .playwright-mcp
209 | .mcp.json
210 |
211 | TASK.md
212 | TASK2.md
213 | TASK_IN_PROGRESS.md
214 | TASK_DONE.md
215 |
--------------------------------------------------------------------------------
/app/routes/generate.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pydantic import BaseModel
3 | from typing import List, Dict, Any, Optional
4 | import json
5 | from app.services.actions_loader import actions_loader
6 |
7 | router = APIRouter(prefix="/api", tags=["generate"])
8 |
9 | class GenerateRequest(BaseModel):
10 | action_ids: List[str]
11 | formats: List[str] = ["claude"] # claude, cursor, agents
12 | source: str = "scratch" # "repo", "template", or "scratch"
13 | repo_url: Optional[str] = None # For tracking the source repo when source="repo"
14 |
15 | class GenerateResponse(BaseModel):
16 | files: Dict[str, str]
17 | patch: str
18 | source: str
19 |
20 | @router.post("/generate", operation_id="generate_configuration")
21 | async def generate_configuration(request: GenerateRequest) -> GenerateResponse:
22 | """Generate configuration files from selected action IDs"""
23 |
24 | files = {}
25 |
26 | # Load action details
27 | selected_agents = []
28 | selected_rules = []
29 | selected_mcps = []
30 |
31 | for action_id in request.action_ids:
32 | # Try to find the action in different categories
33 |
34 | # Check agents
35 | agent = actions_loader.get_agent(action_id)
36 | if agent:
37 | selected_agents.append(agent)
38 | continue
39 |
40 | # Check rules
41 | rule = actions_loader.get_rule(action_id)
42 | if rule:
43 | selected_rules.append(rule)
44 | continue
45 |
46 | # Check MCPs
47 | mcp = actions_loader.get_mcp(action_id)
48 | if mcp:
49 | selected_mcps.append(mcp)
50 | continue
51 |
52 | # Generate files based on selected formats
53 | for format_type in request.formats:
54 | if format_type == "claude":
55 | # Generate CLAUDE.md if there are rules
56 | if selected_rules:
57 | claude_content = ""
58 | for rule in selected_rules:
59 | if rule.get('content'):
60 | claude_content += rule['content'].strip() + "\n\n"
61 |
62 | if claude_content:
63 | files['CLAUDE.md'] = claude_content.strip()
64 |
65 | # Generate agent files for Claude format
66 | for agent in selected_agents:
67 | if agent.get('content'):
68 | filename = agent.get('filename', f"{agent['name']}.md")
69 | files[f".claude/agents/{filename}"] = agent['content']
70 |
71 | elif format_type == "cursor":
72 | # Generate .cursorrules file
73 | if selected_rules:
74 | cursor_content = ""
75 | for rule in selected_rules:
76 | if rule.get('content'):
77 | cursor_content += rule['content'].strip() + "\n\n"
78 |
79 | if cursor_content:
80 | files['.cursorrules'] = cursor_content.strip()
81 |
82 | elif format_type == "agents":
83 | # Generate AGENTS.md file with rules (copy of CLAUDE.md)
84 | if selected_rules:
85 | agents_content = ""
86 | for rule in selected_rules:
87 | if rule.get('content'):
88 | agents_content += rule['content'].strip() + "\n\n"
89 |
90 | if agents_content:
91 | files['AGENTS.md'] = agents_content.strip()
92 |
93 | # Generate .mcp.json if there are MCPs
94 | if selected_mcps:
95 | mcp_config = {"mcpServers": {}}
96 | for mcp in selected_mcps:
97 | if mcp.get('config'):
98 | mcp_config["mcpServers"][mcp['name']] = mcp['config']
99 |
100 | if mcp_config["mcpServers"]:
101 | files['.mcp.json'] = json.dumps(mcp_config, indent=2)
102 |
103 | # Generate patch file
104 | patch = generate_patch(files, request.source, request.repo_url)
105 |
106 | return GenerateResponse(files=files, patch=patch, source=request.source)
107 |
108 | def generate_patch(files: Dict[str, str], source: str = "scratch", repo_url: str = None) -> str:
109 | """
110 | Generate a unified diff patch from the files.
111 |
112 | Args:
113 | files: Dictionary of file paths and their contents
114 | source: Source of the generation ("repo", "template", or "scratch")
115 | repo_url: URL of source repository if source is "repo"
116 |
117 | Returns:
118 | Unified diff patch string that can be applied with patch command
119 | """
120 | patch_lines = []
121 |
122 | # Add a comment header explaining the patch
123 | if source == "repo" and repo_url:
124 | patch_lines.append(f"# Gitrules configuration patch generated from repository: {repo_url}")
125 | patch_lines.append("# Apply with: patch -p0 < ")
126 | elif source == "template":
127 | patch_lines.append("# Gitrules configuration patch generated from template")
128 | patch_lines.append("# Apply with: patch -p0 < ")
129 | else:
130 | patch_lines.append("# Gitrules configuration patch generated from scratch")
131 | patch_lines.append("# Apply with: patch -p0 < ")
132 |
133 | patch_lines.append("")
134 |
135 | for filepath, content in files.items():
136 | # Standard patch format
137 | patch_lines.append(f"--- /dev/null")
138 | patch_lines.append(f"+++ {filepath}")
139 |
140 | lines = content.split('\n')
141 | if lines and lines[-1] == '':
142 | lines.pop() # Remove empty last line if present
143 |
144 | patch_lines.append(f"@@ -0,0 +1,{len(lines)} @@")
145 |
146 | for line in lines:
147 | patch_lines.append(f"+{line}")
148 |
149 | patch_lines.append("") # Empty line between files
150 |
151 | return '\n'.join(patch_lines)
--------------------------------------------------------------------------------
/app/templates/components/file_modal.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
Create file
7 |
8 |
9 |
10 |
11 |
18 |
19 |
20 |
21 |
22 |
23 |
28 |
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/app/services/search_service.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Dict, Any
2 | from fuzzywuzzy import fuzz
3 | from app.models.actions import Agent, Rule, MCP
4 | from app.services.actions_loader import actions_loader
5 | import re
6 | import fnmatch
7 |
8 | class SearchService:
9 | def __init__(self):
10 | self.actions_loader = actions_loader
11 |
12 | def _is_wildcard_query(self, query: str) -> bool:
13 | """Check if query contains wildcard characters"""
14 | return '*' in query or '?' in query
15 |
16 | def _wildcard_match(self, pattern: str, text: str) -> bool:
17 | """Check if text matches wildcard pattern"""
18 | return fnmatch.fnmatch(text.lower(), pattern.lower())
19 |
20 | def _calculate_relevance(self, query: str, text: str) -> int:
21 | """Calculate relevance score for fuzzy matching with wildcard support"""
22 | if not text:
23 | return 0
24 | query_lower = query.lower()
25 | text_lower = text.lower()
26 |
27 | # Handle wildcard queries
28 | if self._is_wildcard_query(query):
29 | if self._wildcard_match(query, text):
30 | return 95 # High score for wildcard matches
31 | else:
32 | return 0
33 |
34 | # Exact match gets highest score
35 | if query_lower == text_lower:
36 | return 100
37 |
38 | # Substring match gets high score
39 | if query_lower in text_lower:
40 | return 90
41 |
42 | # Use fuzzy matching for partial matches
43 | return fuzz.partial_ratio(query_lower, text_lower)
44 |
45 | def search_agents(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
46 | """Search for agents by name, display_name, or content"""
47 | agents = self.actions_loader.get_agents()
48 | results = []
49 |
50 | for agent in agents:
51 | # Calculate relevance scores for different fields
52 | name_score = self._calculate_relevance(query, agent.name)
53 | display_score = self._calculate_relevance(query, agent.display_name or "")
54 | content_score = self._calculate_relevance(query, agent.content or "") * 0.5 # Lower weight for content
55 |
56 | max_score = max(name_score, display_score, content_score)
57 |
58 | if max_score > 30: # Threshold for relevance
59 | agent_data = agent.dict()
60 | # Remove content from search results
61 | agent_data.pop("content", None)
62 | results.append({
63 | "agent": agent_data,
64 | "relevance": max_score
65 | })
66 |
67 | # Sort by relevance and limit results
68 | results.sort(key=lambda x: x["relevance"], reverse=True)
69 | return results[:limit]
70 |
71 | def search_rules(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
72 | """Search for rules by name, display_name, content, tags, or author"""
73 | rules = self.actions_loader.get_rules()
74 | results = []
75 |
76 | for rule in rules:
77 | # Calculate relevance scores for different fields
78 | name_score = self._calculate_relevance(query, rule.name)
79 | display_score = self._calculate_relevance(query, rule.display_name or "")
80 | content_score = self._calculate_relevance(query, rule.content or "") * 0.5
81 | author_score = self._calculate_relevance(query, rule.author or "") * 0.7
82 |
83 | # Check tags
84 | tag_score = 0
85 | if rule.tags:
86 | for tag in rule.tags:
87 | tag_score = max(tag_score, self._calculate_relevance(query, tag))
88 |
89 | max_score = max(name_score, display_score, content_score, author_score, tag_score)
90 |
91 | if max_score > 30: # Threshold for relevance
92 | rule_data = rule.dict()
93 | # Remove content from search results
94 | rule_data.pop("content", None)
95 | results.append({
96 | "rule": rule_data,
97 | "relevance": max_score
98 | })
99 |
100 | # Sort by relevance and limit results
101 | results.sort(key=lambda x: x["relevance"], reverse=True)
102 | return results[:limit]
103 |
104 | def search_mcps(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
105 | """Search for MCPs by name or config content"""
106 | mcps = self.actions_loader.get_mcps()
107 | results = []
108 |
109 | for mcp in mcps:
110 | # Calculate relevance scores
111 | name_score = self._calculate_relevance(query, mcp.name)
112 |
113 | # Search in config (convert to string for searching)
114 | config_str = str(mcp.config)
115 | config_score = self._calculate_relevance(query, config_str) * 0.5
116 |
117 | max_score = max(name_score, config_score)
118 |
119 | if max_score > 30: # Threshold for relevance
120 | mcp_data = mcp.dict()
121 | # Remove config from search results
122 | mcp_data.pop("config", None)
123 | results.append({
124 | "mcp": mcp_data,
125 | "relevance": max_score
126 | })
127 |
128 | # Sort by relevance and limit results
129 | results.sort(key=lambda x: x["relevance"], reverse=True)
130 | return results[:limit]
131 |
132 | def search_all(self, query: str, limit: int = 10) -> Dict[str, Any]:
133 | """Search across all types (agents, rules, MCPs)"""
134 | return {
135 | "agents": self.search_agents(query, limit),
136 | "rules": self.search_rules(query, limit),
137 | "mcps": self.search_mcps(query, limit)
138 | }
139 |
140 | # Create singleton instance
141 | search_service = SearchService()
--------------------------------------------------------------------------------
/app/services/smart_ingest.py:
--------------------------------------------------------------------------------
1 | """
2 | Functions for ingesting repositories and sending context to OpenAI API.
3 | """
4 |
5 | import httpx
6 | from typing import Optional, Dict, Any
7 | from dotenv import load_dotenv
8 | import os
9 | from loguru import logger
10 |
11 | # Load environment variables from .env file
12 | load_dotenv()
13 |
14 |
15 | async def use_gitingest(url: str, context_size: int = 50000) -> str:
16 | """
17 | Ingest a repository using gitingest.com API and trim to specified token size.
18 |
19 | Args:
20 | url: Repository URL to ingest
21 | context_size: Maximum context size in tokens (default ~50k tokens)
22 |
23 | Returns:
24 | String containing the repository context, trimmed to specified size
25 | """
26 | logger.info(f"Ingesting repository from {url}")
27 | # Query gitingest.com API instead of local package
28 | async with httpx.AsyncClient(timeout=120.0) as client:
29 | try:
30 | # Call gitingest.com API
31 | response = await client.post(
32 | "https://gitingest.com/api/ingest",
33 | json={
34 | "input_text": url,
35 | "max_file_size": 102400,
36 | "pattern_type": "exclude",
37 | "pattern": "",
38 | "token": ""
39 | },
40 | headers={
41 | "Content-Type": "application/json"
42 | }
43 | )
44 | response.raise_for_status()
45 |
46 | # Parse response - assuming it returns the full context
47 | data = response.json()
48 | full_context = data.get("content", "")
49 |
50 | # If the API returns structured data, combine it
51 | if isinstance(data, dict) and "summary" in data:
52 | summary = data.get("summary", "")
53 | tree = data.get("tree", "")
54 | content = data.get("content", "")
55 | full_context = f"{summary}\n\n{tree}\n\n{content}"
56 |
57 | except httpx.HTTPError as e:
58 | logger.error(f"Failed to ingest repository from gitingest.com: {str(e)}")
59 | raise Exception(f"Failed to ingest repository from gitingest.com: {str(e)}")
60 |
61 | # Approximate token count (roughly 4 chars per token)
62 | # Trim to specified context size
63 | max_chars = context_size * 4
64 | original_length = len(full_context)
65 | if len(full_context) > max_chars:
66 | full_context = full_context[:max_chars]
67 | # Add ellipsis to indicate truncation
68 | full_context += "\n\n... (context truncated)"
69 | logger.info(f"Context truncated from {original_length} to {len(full_context)} characters")
70 | else:
71 | logger.info(f"Repository context ingested: {len(full_context)} characters")
72 |
73 | return full_context
74 |
75 |
76 | def smart_ingest(
77 | context: str,
78 | user_prompt: str = "Analyze this repository and provide insights",
79 | api_key: Optional[str] = None
80 | ) -> Dict[str, Any]:
81 | """
82 | Send the ingested repository context to OpenAI API with a system prompt.
83 |
84 | Args:
85 | context: The "big fat context" from use_git_ingest function
86 | user_prompt: The user's question or request about the repository
87 | api_key: Optional OpenAI API key (defaults to env var OPENAI_API_KEY)
88 |
89 | Returns:
90 | Dictionary containing OpenAI's response and metadata
91 |
92 | Raises:
93 | Exception: If the API call fails
94 | """
95 | # Get API key from environment if not provided
96 | if not api_key:
97 | api_key = os.getenv("OPENAI_API_KEY")
98 | if not api_key:
99 | raise ValueError("OPENAI_API_KEY not found in environment variables")
100 |
101 | # System prompt for repository analysis
102 | system_prompt = """You are an expert code analyst and software architect.
103 | You have been given the complete context of a repository including its structure and file contents.
104 | Analyze the repository thoroughly and provide insights based on the user's request.
105 | Focus on:
106 | - Code quality and architecture
107 | - Potential improvements
108 | - Security considerations
109 | - Documentation completeness
110 | - Dependencies and technical debt
111 | Be specific and provide actionable recommendations."""
112 |
113 | # Prepare messages for OpenAI
114 | messages = [
115 | {
116 | "role": "system",
117 | "content": system_prompt
118 | },
119 | {
120 | "role": "user",
121 | "content": f"{user_prompt}\n\n{context}"
122 | }
123 | ]
124 |
125 | # OpenAI API endpoint
126 | url = "https://api.openai.com/v1/chat/completions"
127 |
128 | # Headers for the API request
129 | headers = {
130 | "Authorization": f"Bearer {api_key}",
131 | "Content-Type": "application/json"
132 | }
133 |
134 | # Request body
135 | data = {
136 | "model": "gpt-4o-mini", # Using GPT-4o-mini for cost efficiency
137 | "messages": messages,
138 | "temperature": 0.3, # Lower temperature for more focused analysis
139 | "max_tokens": 4096
140 | }
141 |
142 | try:
143 | # Make the API call
144 | with httpx.Client(timeout=60.0) as client:
145 | response = client.post(url, json=data, headers=headers)
146 | response.raise_for_status()
147 |
148 | result = response.json()
149 |
150 | # Extract the response
151 | choice = result.get("choices", [{}])[0]
152 | message = choice.get("message", {})
153 |
154 | return {
155 | "success": True,
156 | "response": message.get("content", ""),
157 | "model": result.get("model"),
158 | "usage": result.get("usage", {}),
159 | "finish_reason": choice.get("finish_reason")
160 | }
161 |
162 | except httpx.HTTPStatusError as e:
163 | error_detail = e.response.text if e.response else str(e)
164 | logger.error(f"OpenAI API error: {e.response.status_code} - {error_detail}")
165 | raise Exception(f"OpenAI API error: {e.response.status_code} - {error_detail}")
166 | except Exception as e:
167 | logger.error(f"Failed to send context to OpenAI: {str(e)}")
168 | raise Exception(f"Failed to send context to OpenAI: {str(e)}")
--------------------------------------------------------------------------------
/app/templates/components/styles.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/app/services/recommend_tools.py:
--------------------------------------------------------------------------------
1 | """
2 | Service for recommending tools based on repository context.
3 | """
4 |
5 | import json
6 | import hashlib
7 | from typing import Dict, List, Tuple, Optional, Any
8 | from app.services.actions_loader import actions_loader
9 | import httpx
10 | from dotenv import load_dotenv
11 | import os
12 |
13 | # Load environment variables
14 | load_dotenv()
15 |
16 |
17 | def build_tools_catalog() -> Dict[str, List[Dict[str, Any]]]:
18 | """
19 | Build a minimal catalog of available tools from actions_loader.
20 |
21 | Returns:
22 | Dictionary with three lists: agents, rules, mcps
23 | Each item has: slug, display_name, tags (optional), type (for rules)
24 | """
25 | catalog = {
26 | "agents": [],
27 | "rules": [],
28 | "mcps": []
29 | }
30 |
31 | # Get agents
32 | for agent in actions_loader.get_agents():
33 | catalog["agents"].append({
34 | "slug": agent.slug or agent.name,
35 | "display_name": agent.display_name or agent.name,
36 | "tags": getattr(agent, 'tags', []) or []
37 | })
38 |
39 | # Get rules
40 | for rule in actions_loader.get_rules():
41 | catalog["rules"].append({
42 | "slug": rule.slug or rule.name,
43 | "display_name": rule.display_name or rule.name,
44 | "type": rule.type, # 'rule' or 'ruleset'
45 | "tags": getattr(rule, 'tags', []) or []
46 | })
47 |
48 | # Get MCPs (note: MCP uses 'name' as identifier)
49 | for mcp in actions_loader.get_mcps():
50 | catalog["mcps"].append({
51 | "slug": mcp.name, # MCPs use 'name' as slug
52 | "display_name": mcp.name,
53 | "tags": [] # MCPs don't have tags in current structure
54 | })
55 |
56 | # Sort by slug for stability
57 | catalog["agents"].sort(key=lambda x: x["slug"])
58 | catalog["rules"].sort(key=lambda x: x["slug"])
59 | catalog["mcps"].sort(key=lambda x: x["slug"])
60 |
61 | return catalog
62 |
63 |
64 | def get_catalog_version(catalog: Dict[str, List[Dict[str, Any]]]) -> str:
65 | """
66 | Calculate a stable hash of the catalog slugs.
67 |
68 | Args:
69 | catalog: The tools catalog
70 |
71 | Returns:
72 | SHA1 hash of concatenated sorted slugs
73 | """
74 | all_slugs = []
75 | all_slugs.extend([a["slug"] for a in catalog["agents"]])
76 | all_slugs.extend([r["slug"] for r in catalog["rules"]])
77 | all_slugs.extend([m["slug"] for m in catalog["mcps"]])
78 |
79 | slug_string = ",".join(sorted(all_slugs))
80 | return hashlib.sha1(slug_string.encode()).hexdigest()[:8]
81 |
82 |
83 | def format_catalog_for_prompt(catalog: Dict[str, List[Dict[str, Any]]]) -> str:
84 | """
85 | Format the catalog into a compact text for the LLM prompt.
86 |
87 | Args:
88 | catalog: The tools catalog
89 |
90 | Returns:
91 | Formatted string with one line per tool
92 | """
93 | lines = []
94 |
95 | # Format agents
96 | lines.append("- Agents:")
97 | for agent in catalog["agents"]:
98 | tags = f" — [{', '.join(agent['tags'])}]" if agent.get('tags') else ""
99 | lines.append(f" {agent['slug']} — {agent['display_name']}{tags}")
100 |
101 | # Format rules
102 | lines.append("- Rules:")
103 | for rule in catalog["rules"]:
104 | tags = f" — [{', '.join(rule['tags'])}]" if rule.get('tags') else ""
105 | lines.append(f" {rule['slug']} — {rule['display_name']} — {rule['type']}{tags}")
106 |
107 | # Format MCPs
108 | lines.append("- MCPs:")
109 | for mcp in catalog["mcps"]:
110 | tags = f" — [{', '.join(mcp['tags'])}]" if mcp.get('tags') else ""
111 | lines.append(f" {mcp['slug']} — {mcp['display_name']}{tags}")
112 |
113 | return "\n".join(lines)
114 |
115 |
116 | def call_llm_for_reco(context: str, catalog_text: str, user_prompt: str = "", api_key: Optional[str] = None) -> str:
117 | """
118 | Call the LLM to get tool recommendations.
119 |
120 | Args:
121 | context: Repository context (summary + tree + content)
122 | catalog_text: Formatted catalog of available tools
123 | user_prompt: Optional user guidance
124 | api_key: Optional OpenAI API key
125 |
126 | Returns:
127 | Raw LLM response string
128 | """
129 | # Get API key
130 | if not api_key:
131 | api_key = os.getenv("OPENAI_API_KEY")
132 | if not api_key:
133 | raise ValueError("OPENAI_API_KEY not found")
134 |
135 | # System prompt
136 | system_prompt = """You are "Tool Recommender for Codebases." Your job is to read a repository context and choose a minimal set of helpful tools (rules, agents, MCPs) from the provided catalog.
137 |
138 | Hard requirements:
139 | - Output strictly valid JSON. No markdown, no commentary.
140 | - Use only the slugs present in the catalog below.
141 | - Prefer minimal selections: 0–2 per category (maximum 3).
142 | - If unsure, return empty arrays.
143 |
144 | Selection guidelines:
145 | - Pick items that improve correctness, safety, or developer workflow for this codebase.
146 | - Avoid redundant overlap (e.g., don't pick both a ruleset and all its child rules).
147 | - Skip "fun/novelty" items unless clearly beneficial.
148 | - Base the decision solely on the given repository context and the catalog.
149 |
150 | Catalog (one line per item, slug first):
151 | """ + catalog_text + """
152 |
153 | Return JSON with this exact shape:
154 | - rules: array of slugs
155 | - agents: array of slugs
156 | - mcps: array of slugs
157 | - rationales (optional): object whose keys are "rules:", "agents:", "mcps:" and whose values are short one-line reasons.
158 |
159 | You will now receive the repository context (summary, tree, truncated content) and an optional user focus. Choose minimal helpful tools from the catalog and return JSON only."""
160 |
161 | # User message
162 | user_message = "Here is the codebase context (truncated). Choose minimal useful tools from the catalog above.\n\n"
163 | user_message += context
164 | if user_prompt:
165 | user_message += f"\n\nUser focus: {user_prompt}"
166 |
167 | # Prepare request
168 | messages = [
169 | {"role": "system", "content": system_prompt},
170 | {"role": "user", "content": user_message}
171 | ]
172 |
173 | url = "https://api.openai.com/v1/chat/completions"
174 | headers = {
175 | "Authorization": f"Bearer {api_key}",
176 | "Content-Type": "application/json"
177 | }
178 |
179 | data = {
180 | "model": "gpt-4o-mini",
181 | "messages": messages,
182 | "temperature": 0.2, # Low temperature for consistency
183 | "max_tokens": 1000
184 | }
185 |
186 | try:
187 | with httpx.Client(timeout=60.0) as client:
188 | response = client.post(url, json=data, headers=headers)
189 | response.raise_for_status()
190 | result = response.json()
191 |
192 | # Extract the content
193 | content = result["choices"][0]["message"]["content"]
194 | return content
195 |
196 | except Exception as e:
197 | raise Exception(f"LLM call failed: {str(e)}")
198 |
199 |
200 | def parse_and_validate(llm_raw: str, catalog: Dict[str, List[Dict[str, Any]]]) -> Tuple[Dict[str, List[str]], Optional[Dict[str, str]]]:
201 | """
202 | Parse and validate the LLM response against the catalog.
203 |
204 | Args:
205 | llm_raw: Raw JSON string from LLM
206 | catalog: The tools catalog for validation
207 |
208 | Returns:
209 | Tuple of (preselect dict, rationales dict or None)
210 | """
211 | # Try to parse JSON
212 | try:
213 | data = json.loads(llm_raw)
214 | except json.JSONDecodeError:
215 | # Try to extract JSON from possible markdown or text
216 | import re
217 | json_match = re.search(r'\{[^{}]*\}', llm_raw, re.DOTALL)
218 | if json_match:
219 | try:
220 | data = json.loads(json_match.group())
221 | except:
222 | # Return empty if we can't parse
223 | return {"rules": [], "agents": [], "mcps": []}, None
224 | else:
225 | return {"rules": [], "agents": [], "mcps": []}, None
226 |
227 | # Extract valid slugs
228 | valid_agent_slugs = {a["slug"] for a in catalog["agents"]}
229 | valid_rule_slugs = {r["slug"] for r in catalog["rules"]}
230 | valid_mcp_slugs = {m["slug"] for m in catalog["mcps"]}
231 |
232 | # Filter and limit selections
233 | preselect = {
234 | "rules": [],
235 | "agents": [],
236 | "mcps": []
237 | }
238 |
239 | # Process rules (max 3, dedupe)
240 | if "rules" in data and isinstance(data["rules"], list):
241 | seen = set()
242 | for slug in data["rules"][:3]: # Max 3
243 | if slug in valid_rule_slugs and slug not in seen:
244 | preselect["rules"].append(slug)
245 | seen.add(slug)
246 |
247 | # Process agents (max 3, dedupe)
248 | if "agents" in data and isinstance(data["agents"], list):
249 | seen = set()
250 | for slug in data["agents"][:3]: # Max 3
251 | if slug in valid_agent_slugs and slug not in seen:
252 | preselect["agents"].append(slug)
253 | seen.add(slug)
254 |
255 | # Process MCPs (max 3, dedupe)
256 | if "mcps" in data and isinstance(data["mcps"], list):
257 | seen = set()
258 | for slug in data["mcps"][:3]: # Max 3
259 | if slug in valid_mcp_slugs and slug not in seen:
260 | preselect["mcps"].append(slug)
261 | seen.add(slug)
262 |
263 | # Extract rationales if present
264 | rationales = None
265 | if "rationales" in data and isinstance(data["rationales"], dict):
266 | rationales = {}
267 | # Only keep rationales for selected items
268 | for key, value in data["rationales"].items():
269 | parts = key.split(":", 1)
270 | if len(parts) == 2:
271 | category, slug = parts
272 | if category == "rules" and slug in preselect["rules"]:
273 | rationales[key] = str(value)[:200] # Limit length
274 | elif category == "agents" and slug in preselect["agents"]:
275 | rationales[key] = str(value)[:200]
276 | elif category == "mcps" and slug in preselect["mcps"]:
277 | rationales[key] = str(value)[:200]
278 |
279 | return preselect, rationales
--------------------------------------------------------------------------------
/app/services/actions_loader.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | from typing import List, Dict, Any, Optional
3 | from pathlib import Path
4 | from app.models.actions import Agent, Rule, MCP, Pack, Action, ActionType
5 | from loguru import logger
6 |
7 | class ActionsLoader:
8 | def __init__(self):
9 | self.actions_dir = Path(__file__).parent.parent / "actions"
10 | self.actions: List[Action] = []
11 | # Keep legacy lists for backward compatibility
12 | self.agents: List[Agent] = []
13 | self.rules: List[Rule] = []
14 | self.mcps: List[MCP] = []
15 | self.packs: List[Pack] = []
16 | logger.info(f"Loading actions from {self.actions_dir}")
17 | self.load_all()
18 |
19 | def load_all(self):
20 | """Load all actions from consolidated YAML files"""
21 | self.load_agents()
22 | self.load_rules()
23 | self.load_mcps()
24 | self.load_packs()
25 |
26 | def load_agents(self):
27 | """Load all agents from agents.yaml"""
28 | agents_file = self.actions_dir / "agents.yaml"
29 | if agents_file.exists():
30 | try:
31 | with open(agents_file, 'r') as f:
32 | data = yaml.safe_load(f)
33 | if data and 'agents' in data:
34 | logger.info(f"Loading {len(data['agents'])} agents")
35 | for agent_data in data['agents']:
36 | slug = agent_data.get('slug', '')
37 | # Create Action object
38 | action = Action(
39 | id=slug,
40 | name=slug,
41 | display_name=agent_data.get('display_name'),
42 | action_type=ActionType.AGENT,
43 | tags=agent_data.get('tags', []),
44 | content=agent_data.get('content'),
45 | filename=f"{slug}.md"
46 | )
47 | self.actions.append(action)
48 |
49 | # Also create legacy Agent for backward compatibility
50 | self.agents.append(Agent(
51 | name=slug,
52 | filename=f"{slug}.md",
53 | display_name=agent_data.get('display_name'),
54 | slug=slug,
55 | content=agent_data.get('content'),
56 | tags=agent_data.get('tags', [])
57 | ))
58 | except Exception as e:
59 | logger.error(f"Error loading agents from {agents_file}: {e}")
60 | self.agents = []
61 | else:
62 | logger.warning(f"Agents file not found: {agents_file}")
63 | self.agents = []
64 |
65 | def _parse_rule(self, slug: str, rule_data: Dict[str, Any]) -> Rule:
66 | """Parse a single rule or ruleset from the YAML data"""
67 | rule = Rule(
68 | name=slug, # Use slug as name for backward compat
69 | filename=f"{slug}.yaml", # Virtual filename
70 | display_name=rule_data.get('display_name'),
71 | slug=slug,
72 | content=rule_data.get('content'),
73 | author=rule_data.get('author'),
74 | tags=rule_data.get('tags'),
75 | type=rule_data.get('type', 'rule'),
76 | namespace=rule_data.get('namespace'),
77 | children=rule_data.get('children') # Now just a list of rule IDs
78 | )
79 |
80 | return rule
81 |
82 | def load_rules(self):
83 | """Load all rules from rules.yaml"""
84 | rules_file = self.actions_dir / "rules.yaml"
85 | if rules_file.exists():
86 | with open(rules_file, 'r') as f:
87 | data = yaml.safe_load(f)
88 | if data:
89 | self.rules = []
90 | # Now the top-level keys are the slugs
91 | for slug, rule_data in data.items():
92 | rule = self._parse_rule(slug, rule_data)
93 | self.rules.append(rule)
94 |
95 | # Create Action object
96 | rule_type = ActionType.RULESET if rule_data.get('type') == 'ruleset' else ActionType.RULE
97 | action = Action(
98 | id=slug,
99 | name=slug,
100 | display_name=rule_data.get('display_name'),
101 | action_type=rule_type,
102 | tags=rule_data.get('tags'),
103 | content=rule_data.get('content'),
104 | author=rule_data.get('author'),
105 | children=rule_data.get('children'),
106 | filename=f"{slug}.yaml",
107 | namespace=rule_data.get('namespace')
108 | )
109 | self.actions.append(action)
110 | else:
111 | self.rules = []
112 |
113 | def load_mcps(self):
114 | """Load all MCPs from mcps.yaml"""
115 | mcps_file = self.actions_dir / "mcps.yaml"
116 | if mcps_file.exists():
117 | with open(mcps_file, 'r') as f:
118 | data = yaml.safe_load(f)
119 | if data and 'mcps' in data:
120 | for mcp_data in data['mcps']:
121 | name = mcp_data.get('slug', '')
122 | # Create Action object
123 | action = Action(
124 | id=name,
125 | name=name,
126 | display_name=mcp_data.get('display_name'),
127 | action_type=ActionType.MCP,
128 | tags=mcp_data.get('tags', []),
129 | config=mcp_data.get('config', {}),
130 | description=mcp_data.get('description')
131 | )
132 | self.actions.append(action)
133 |
134 | # Also create legacy MCP for backward compatibility
135 | self.mcps.append(MCP(
136 | name=name,
137 | config=mcp_data.get('config', {}),
138 | tags=mcp_data.get('tags', []),
139 | description=mcp_data.get('description')
140 | ))
141 | else:
142 | self.mcps = []
143 |
144 | def get_all(self) -> Dict[str, Any]:
145 | """Get all loaded actions"""
146 | return {
147 | "agents": self.agents,
148 | "rules": self.rules,
149 | "mcps": self.mcps
150 | }
151 |
152 | def get_agents(self) -> List[Agent]:
153 | """Get all agents"""
154 | return self.agents
155 |
156 | def get_rules(self) -> List[Rule]:
157 | """Get all rules"""
158 | return self.rules
159 |
160 | def get_mcps(self) -> List[MCP]:
161 | """Get all MCPs"""
162 | return self.mcps
163 |
164 | def get_agent_by_slug(self, slug: str) -> Agent:
165 | """Get a specific agent by slug"""
166 | return next((a for a in self.agents if a.slug == slug), None)
167 |
168 | def get_rule_by_slug(self, slug: str) -> Rule:
169 | """Get a specific rule by slug"""
170 | return next((r for r in self.rules if r.slug == slug), None)
171 |
172 | def load_packs(self):
173 | """Load all packs from packs.yaml"""
174 | packs_file = self.actions_dir / "packs.yaml"
175 | if packs_file.exists():
176 | with open(packs_file, 'r') as f:
177 | data = yaml.safe_load(f)
178 | if data and 'packs' in data:
179 | for pack_data in data['packs']:
180 | pack_id = pack_data.get('id', '')
181 | # Create Action object
182 | action = Action(
183 | id=pack_id,
184 | name=pack_data.get('name', ''),
185 | display_name=pack_data.get('display_name'),
186 | action_type=ActionType.PACK,
187 | tags=pack_data.get('tags', []),
188 | children=pack_data.get('actions', [])
189 | )
190 | self.actions.append(action)
191 |
192 | # Also create Pack for backward compatibility
193 | self.packs.append(Pack(
194 | id=pack_id,
195 | name=pack_data.get('name', ''),
196 | display_name=pack_data.get('display_name'),
197 | tags=pack_data.get('tags', []),
198 | description=pack_data.get('description'),
199 | actions=pack_data.get('actions', [])
200 | ))
201 | else:
202 | self.packs = []
203 |
204 | def get_packs(self) -> List[Pack]:
205 | """Get all packs"""
206 | return self.packs
207 |
208 | def get_actions(self, action_type: Optional[ActionType] = None, tags: Optional[List[str]] = None,
209 | limit: int = 30, offset: int = 0) -> List[Action]:
210 | """Get all actions with optional filtering"""
211 | filtered = self.actions
212 |
213 | # Filter by action type
214 | if action_type:
215 | filtered = [a for a in filtered if a.action_type == action_type]
216 |
217 | # Filter by tags
218 | if tags:
219 | filtered = [a for a in filtered if a.tags and any(tag in a.tags for tag in tags)]
220 |
221 | # Apply pagination
222 | return filtered[offset:offset + limit]
223 |
224 | def get_action_by_id(self, action_id: str) -> Optional[Action]:
225 | """Get a specific action by ID"""
226 | return next((a for a in self.actions if a.id == action_id), None)
227 |
228 | def get_agent(self, action_id: str) -> Optional[Dict[str, Any]]:
229 | """Get agent data by ID for legacy compatibility"""
230 | action = self.get_action_by_id(action_id)
231 | if action and action.action_type == ActionType.AGENT:
232 | return {
233 | 'name': action.name,
234 | 'display_name': action.display_name,
235 | 'slug': action.id,
236 | 'filename': action.filename,
237 | 'content': action.content,
238 | 'tags': action.tags
239 | }
240 | return None
241 |
242 | def get_rule(self, action_id: str) -> Optional[Dict[str, Any]]:
243 | """Get rule data by ID for legacy compatibility"""
244 | action = self.get_action_by_id(action_id)
245 | if action and action.action_type in [ActionType.RULE, ActionType.RULESET]:
246 | return {
247 | 'name': action.name,
248 | 'display_name': action.display_name,
249 | 'slug': action.id,
250 | 'content': action.content,
251 | 'tags': action.tags,
252 | 'type': action.action_type.value.lower()
253 | }
254 | return None
255 |
256 | def get_mcp(self, action_id: str) -> Optional[Dict[str, Any]]:
257 | """Get MCP data by ID for legacy compatibility"""
258 | action = self.get_action_by_id(action_id)
259 | if action and action.action_type == ActionType.MCP:
260 | return {
261 | 'name': action.name,
262 | 'display_name': action.display_name,
263 | 'slug': action.id,
264 | 'config': action.config,
265 | 'tags': action.tags,
266 | 'description': action.description
267 | }
268 | return None
269 |
270 | # Create singleton instance
271 | actions_loader = ActionsLoader()
--------------------------------------------------------------------------------
/app/templates/landing.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block content %}
4 |
5 |
6 |
7 | Rules for
8 | coding agents
9 |
10 |
11 |

12 |

13 |
14 |
15 |
16 |
17 |
18 |
19 |
21 |
22 |
23 | 📂
24 |
25 |
Start from Repository
26 |
Import settings from an existing repository
27 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 | 📋
72 |
73 |
Use Template
74 | COMING SOON
75 |
76 |
Start with a pre-configured template
77 |
78 |
87 |
91 |
92 |
93 |
94 |
95 |
96 |
98 |
99 |
100 | ✨
101 |
102 |
Start Fresh
103 |
Build your configuration from scratch
104 |
105 |
106 |
107 |
108 |
109 |
110 |
226 | {% endblock %}
--------------------------------------------------------------------------------
/test_state_management.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | State Management Test
7 |
49 |
50 |
51 | 🧪 State Management Test Suite
52 |
53 |
54 |
Test Controls
55 |
56 |
57 |
58 |
59 |
60 |
61 |
Test Results
62 |
63 |
64 |
65 |
66 |
State Inspector
67 |
Click "Inspect Current State" to view
68 |
69 |
70 |
71 |
310 |
311 |
--------------------------------------------------------------------------------
/gitingest.md:
--------------------------------------------------------------------------------
1 | # GitIngest – **AI Agent Integration Guide**
2 |
3 | Turn any Git repository into a prompt-ready text digest. GitIngest fetches, cleans, and formats source code so AI agents and Large Language Models can reason over complete projects programmatically.
4 |
5 | **🤖 For AI Agents**: Use CLI or Python package for automated integration. Web UI is designed for human interaction only.
6 |
7 | ---
8 | ## 1. Installation
9 |
10 | ### 1.1 CLI Installation (Recommended for Scripts & Automation)
11 | ```bash
12 | # Best practice: Use pipx for CLI tools (isolated environment)
13 | pipx install gitingest
14 |
15 | # Alternative: Use pip (may conflict with other packages)
16 | pip install gitingest
17 |
18 | # Verify installation
19 | gitingest --help
20 | ```
21 |
22 | ### 1.2 Python Package Installation (For Code Integration)
23 | ```bash
24 | # For projects/notebooks: Use pip in virtual environment
25 | python -m venv gitingest-env
26 | source gitingest-env/bin/activate # On Windows: gitingest-env\Scripts\activate
27 | pip install gitingest
28 |
29 | # Or add to requirements.txt
30 | echo "gitingest" >> requirements.txt
31 | pip install -r requirements.txt
32 |
33 | # For self-hosting: Install with server dependencies
34 | pip install gitingest[server]
35 |
36 | # For development: Install with dev dependencies
37 | pip install gitingest[dev,server]
38 | ```
39 |
40 | ### 1.3 Installation Verification
41 | ```bash
42 | # Test CLI installation
43 | gitingest --version
44 |
45 | # Test Python package
46 | python -c "from gitingest import ingest; print('GitIngest installed successfully')"
47 |
48 | # Quick functionality test
49 | gitingest https://github.com/octocat/Hello-World -o test_output.txt
50 | ```
51 |
52 | ---
53 | ## 2. Quick-Start for AI Agents
54 | | Method | Best for | One-liner |
55 | |--------|----------|-----------|
56 | | **CLI** | Scripts, automation, pipelines | `gitingest https://github.com/user/repo -o - \| your-llm` |
57 | | **Python** | Code integration, notebooks, async tasks | `from gitingest import ingest; s,t,c = ingest('repo-url'); process(c)` |
58 | | **URL Hack** | Quick web scraping (limited) | Replace `github.com` → `gitingest.com` in any GitHub URL |
59 | | **Web UI** | **Human use only** | ~~Not recommended for AI agents~~ |
60 |
61 | ---
62 | ## 3. Output Format for AI Processing
63 | GitIngest returns **structured plain-text** optimized for LLM consumption with three distinct sections:
64 |
65 | ### 3.1 Repository Summary
66 | ```
67 | Repository: owner/repo-name
68 | Files analyzed: 42
69 | Estimated tokens: 15.2k
70 | ```
71 | Contains basic metadata: repository name, file count, and token estimation for LLM planning.
72 |
73 | ### 3.2 Directory Structure
74 | ```
75 | Directory structure:
76 | └── project-name/
77 | ├── src/
78 | │ ├── main.py
79 | │ └── utils.py
80 | ├── tests/
81 | │ └── test_main.py
82 | └── README.md
83 | ```
84 | Hierarchical tree view showing the complete project structure for context and navigation.
85 |
86 | ### 3.3 File Contents
87 | Each file is wrapped with clear delimiters:
88 | ```
89 | ================================================
90 | FILE: src/main.py
91 | ================================================
92 | def hello_world():
93 | print("Hello, World!")
94 |
95 | if __name__ == "__main__":
96 | hello_world()
97 |
98 |
99 | ================================================
100 | FILE: README.md
101 | ================================================
102 | # Project Title
103 |
104 | This is a sample project...
105 | ```
106 |
107 | ### 3.4 Usage Example
108 | ```python
109 | # Python package usage
110 | from gitingest import ingest
111 |
112 | summary, tree, content = ingest("https://github.com/octocat/Hello-World")
113 |
114 | # Returns exactly:
115 | # summary = "Repository: octocat/hello-world\nFiles analyzed: 1\nEstimated tokens: 29"
116 | # tree = "Directory structure:\n└── octocat-hello-world/\n └── README"
117 | # content = "================================================\nFILE: README\n================================================\nHello World!\n\n\n"
118 |
119 | # For AI processing, combine all sections:
120 | full_context = f"{summary}\n\n{tree}\n\n{content}"
121 | ```
122 |
123 | ```bash
124 | # CLI usage - pipe directly to your AI system
125 | gitingest https://github.com/octocat/Hello-World -o - | your_llm_processor
126 |
127 | # Output streams the complete formatted text:
128 | # Repository: octocat/hello-world
129 | # Files analyzed: 1
130 | # Estimated tokens: 29
131 | #
132 | # Directory structure:
133 | # └── octocat-hello-world/
134 | # └── README
135 | #
136 | # ================================================
137 | # FILE: README
138 | # ================================================
139 | # Hello World!
140 | ```
141 |
142 |
143 |
144 | ---
145 | ## 4. AI Agent Integration Methods
146 |
147 | ### 4.1 CLI Integration (Recommended for Automation)
148 | ```bash
149 | # Basic usage - pipe directly to your AI system
150 | gitingest https://github.com/user/repo -o - | your_ai_processor
151 |
152 | # Advanced filtering for focused analysis (long flags)
153 | gitingest https://github.com/user/repo \
154 | --include-pattern "*.py" --include-pattern "*.js" --include-pattern "*.md" \
155 | --max-size 102400 \
156 | -o - | python your_analyzer.py
157 |
158 | # Same command with short flags (more concise)
159 | gitingest https://github.com/user/repo \
160 | -i "*.py" -i "*.js" -i "*.md" \
161 | -s 102400 \
162 | -o - | python your_analyzer.py
163 |
164 | # Exclude unwanted files and directories (long flags)
165 | gitingest https://github.com/user/repo \
166 | --exclude-pattern "node_modules/*" --exclude-pattern "*.log" \
167 | --exclude-pattern "dist/*" \
168 | -o - | your_analyzer
169 |
170 | # Same with short flags
171 | gitingest https://github.com/user/repo \
172 | -e "node_modules/*" -e "*.log" -e "dist/*" \
173 | -o - | your_analyzer
174 |
175 | # Private repositories with token (short flag)
176 | export GITHUB_TOKEN="ghp_your_token_here"
177 | gitingest https://github.com/user/private-repo -t $GITHUB_TOKEN -o -
178 |
179 | # Specific branch analysis (short flag)
180 | gitingest https://github.com/user/repo -b main -o -
181 |
182 | # Save to file (default: digest.txt in current directory)
183 | gitingest https://github.com/user/repo -o my_analysis.txt
184 |
185 | # Ultra-concise example for small files only
186 | gitingest https://github.com/user/repo -i "*.py" -s 51200 -o -
187 | ```
188 |
189 | **Key Parameters for AI Agents**:
190 | - `-s` / `--max-size`: Maximum file size in bytes to process (default: no limit)
191 | - `-i` / `--include-pattern`: Include files matching Unix shell-style wildcards
192 | - `-e` / `--exclude-pattern`: Exclude files matching Unix shell-style wildcards
193 | - `-b` / `--branch`: Specify branch to analyze (defaults to repository's default branch)
194 | - `-t` / `--token`: GitHub personal access token for private repositories
195 | - `-o` / `--output`: Stream to STDOUT with `-` (default saves to `digest.txt`)
196 |
197 | ### 4.2 Python Package (Best for Code Integration)
198 | ```python
199 | from gitingest import ingest, ingest_async
200 | import asyncio
201 |
202 | # Synchronous processing
203 | def analyze_repository(repo_url: str):
204 | summary, tree, content = ingest(repo_url)
205 |
206 | # Process metadata
207 | repo_info = parse_summary(summary)
208 |
209 | # Analyze structure
210 | file_structure = parse_tree(tree)
211 |
212 | # Process code content
213 | return analyze_code(content)
214 |
215 | # Asynchronous processing (recommended for AI services)
216 | async def batch_analyze_repos(repo_urls: list):
217 | tasks = [ingest_async(url) for url in repo_urls]
218 | results = await asyncio.gather(*tasks)
219 | return [process_repo_data(*result) for result in results]
220 |
221 | # Memory-efficient processing for large repos
222 | def stream_process_repo(repo_url: str):
223 | summary, tree, content = ingest(
224 | repo_url,
225 | max_file_size=51200, # 50KB max per file
226 | include_patterns=["*.py", "*.js"], # Focus on code files
227 | )
228 |
229 | # Process in chunks to manage memory
230 | for file_content in split_content(content):
231 | yield analyze_file(file_content)
232 |
233 | # Filtering with exclude patterns
234 | def analyze_without_deps(repo_url: str):
235 | summary, tree, content = ingest(
236 | repo_url,
237 | exclude_patterns=[
238 | "node_modules/*", "*.lock", "dist/*",
239 | "build/*", "*.min.js", "*.log"
240 | ]
241 | )
242 | return analyze_code(content)
243 | ```
244 |
245 | **Python Integration Patterns**:
246 | - **Batch Processing**: Use `ingest_async` for multiple repositories
247 | - **Memory Management**: Use `max_file_size` and pattern filtering for large repos
248 | - **Error Handling**: Wrap in try-catch for network/auth issues
249 | - **Caching**: Store results to avoid repeated API calls
250 | - **Pattern Filtering**: Use `include_patterns` and `exclude_patterns` lists
251 |
252 | ### 4.3 Web UI (❌ Not for AI Agents)
253 | The web interface at `https://gitingest.com` is designed for **human interaction only**.
254 |
255 | **Why AI agents should avoid the web UI**:
256 | - Requires manual interaction and browser automation
257 | - No programmatic access to results
258 | - Rate limiting and CAPTCHA protection
259 | - Inefficient for automated workflows
260 |
261 | **Use CLI or Python package instead** for all AI agent integrations.
262 |
263 | ---
264 | ## 5. AI Agent Best Practices
265 |
266 | ### 5.1 Repository Analysis Workflows
267 | ```python
268 | # Pattern 1: Full repository analysis
269 | def full_repo_analysis(repo_url: str):
270 | summary, tree, content = ingest(repo_url)
271 | return {
272 | 'metadata': extract_metadata(summary),
273 | 'structure': analyze_structure(tree),
274 | 'code_analysis': analyze_all_files(content),
275 | 'insights': generate_insights(summary, tree, content)
276 | }
277 |
278 | # Pattern 2: Selective file processing
279 | def selective_analysis(repo_url: str, file_patterns: list):
280 | summary, tree, content = ingest(
281 | repo_url,
282 | include_patterns=file_patterns
283 | )
284 | return focused_analysis(content)
285 |
286 | # Pattern 3: Streaming for large repos
287 | def stream_analysis(repo_url: str):
288 | # First pass: get structure and metadata only
289 | summary, tree, _ = ingest(
290 | repo_url,
291 | include_patterns=["*.md", "*.txt"],
292 | max_file_size=10240 # 10KB limit for docs
293 | )
294 |
295 | # Then process code files selectively by language
296 | for pattern in ["*.py", "*.js", "*.go", "*.rs"]:
297 | _, _, content = ingest(
298 | repo_url,
299 | include_patterns=[pattern],
300 | max_file_size=51200 # 50KB limit for code
301 | )
302 | yield process_language_specific(content, pattern)
303 | ```
304 |
305 | ### 5.2 Error Handling for AI Agents
306 | ```python
307 | from gitingest import ingest
308 | from gitingest.utils.exceptions import GitIngestError
309 | import time
310 |
311 | def robust_ingest(repo_url: str, retries: int = 3):
312 | for attempt in range(retries):
313 | try:
314 | return ingest(repo_url)
315 | except GitIngestError as e:
316 | if attempt == retries - 1:
317 | return None, None, f"Failed to ingest: {e}"
318 | time.sleep(2 ** attempt) # Exponential backoff
319 | ```
320 |
321 | ### 5.3 Private Repository Access
322 | ```python
323 | import os
324 | from gitingest import ingest
325 |
326 | # Method 1: Environment variable
327 | def ingest_private_repo(repo_url: str):
328 | token = os.getenv('GITHUB_TOKEN')
329 | if not token:
330 | raise ValueError("GITHUB_TOKEN environment variable required")
331 | return ingest(repo_url, token=token)
332 |
333 | # Method 2: Secure token management
334 | def ingest_with_token_rotation(repo_url: str, token_manager):
335 | token = token_manager.get_active_token()
336 | try:
337 | return ingest(repo_url, token=token)
338 | except AuthenticationError:
339 | token = token_manager.rotate_token()
340 | return ingest(repo_url, token=token)
341 | ```
342 |
343 | ---
344 | ## 6. Integration Scenarios for AI Agents
345 |
346 | | Use Case | Recommended Method | Example Implementation |
347 | |----------|-------------------|----------------------|
348 | | **Code Review Bot** | Python async | `await ingest_async(pr_repo)` → analyze changes |
349 | | **Documentation Generator** | CLI with filtering | `gitingest repo -i "*.py" -i "*.md" -o -` |
350 | | **Vulnerability Scanner** | Python with error handling | Batch process multiple repos |
351 | | **Code Search Engine** | CLI → Vector DB | `gitingest repo -o - \| embed \| store` |
352 | | **AI Coding Assistant** | Python integration | Load repo context into conversation |
353 | | **CI/CD Analysis** | CLI integration | `gitingest repo -o - \| analyze_pipeline` |
354 | | **Repository Summarization** | Python with streaming | Process large repos in chunks |
355 | | **Dependency Analysis** | CLI exclude patterns | `gitingest repo -e "node_modules/*" -e "*.lock" -o -` |
356 | | **Security Audit** | CLI with size limits | `gitingest repo -i "*.py" -i "*.js" -s 204800 -o -` |
357 |
358 | ---
359 | ## 7. Support & Resources for AI Developers
360 | * **Web UI official instance**: https://gitingest.com
361 | * **GitHub Repository**: https://github.com/coderamp-labs/gitingest
362 | * **Python Package**: https://pypi.org/project/gitingest/
363 | * **Community Support**: https://discord.gg/zerRaGK9EC
364 |
365 | _GitIngest – Purpose-built for AI agents to understand entire codebases programmatically._
--------------------------------------------------------------------------------
/app/templates/docs.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block title %}Gitrules - How it works{% endblock %}
4 |
5 | {% block content %}
6 |
7 |
8 |
9 |
10 |
11 | Supercharge Claude with
12 | CLAUDE.md files and more
13 |
14 |
15 | Create CLAUDE.md, .cursorrules, and agent files that tell Claude exactly how to work on your codebase.
16 |
17 |
18 | Drop them into your repo with a single command. Plain files your team owns.
19 |
20 |
21 |
22 |
23 | 📝 CLAUDE.md
24 | Project rules
25 |
26 |
27 | 🧠 .claude/agents/
28 | AI helpers
29 |
30 |
31 | 🔌 .mcp.json
32 | Claude plugins
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 | ◆ Why this matters for Claude users
45 |
46 |
47 | -
48 | ▸
49 | Claude & Claude Code automatically read CLAUDE.md files to understand your project's rules and conventions.
50 |
51 | -
52 | ▸
53 | Today your instructions to Claude live in scattered chats and one-off prompts. Hard to share. Easy to lose.
54 |
55 | -
56 | ▸
57 | Gitrules creates CLAUDE.md and other config files that make Claude work consistently for your whole team.
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 | ◆ How it works
66 |
67 |
68 |
69 |
70 |
71 | 1
72 |
73 |
74 |
Pick what you want
75 |
Choose from simple building blocks:
76 |
77 | - Guidelines - your "house rules" for coding and reviews
78 | - Helpers - ready-made roles, like a "researcher" or "reviewer"
79 | - Add-ons - optional extras—think "plugins" your AI can use
80 |
81 |
82 |
83 |
84 |
85 |
86 | 2
87 |
88 |
89 |
Tweak the text
90 |
Open and edit the wording directly on the page—like writing a checklist or a policy in plain English.
91 |
92 |
93 |
94 |
95 |
96 | 3
97 |
98 |
99 |
Install in your repo
100 |
Copy one command. It creates the same files in your repository.
101 |
No black box. You can read, version, and edit everything.
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 | ◆ Files Claude understands
111 |
112 |
113 |
114 |
✓
115 |
116 |
CLAUDE.md - Your project's house rules
117 |
Claude reads this automatically. Define coding standards, architecture patterns, and project-specific instructions.
118 |
119 |
120 |
121 |
✓
122 |
123 |
.claude/agents/*.md - Specialized AI helpers
124 |
Give Claude different "modes" like researcher, reviewer, or debugger.
125 |
126 |
127 |
128 |
✓
129 |
130 |
.mcp.json - Claude Code plugins (MCPs)
131 |
Add capabilities like database access, API integrations, and more.
132 |
133 |
134 |
135 |
136 |
🎯 Claude Code reads these automatically: Just drop them in your repo and Claude instantly follows your rules.
137 |
138 |
139 |
140 |
141 |
142 | Simple example flows
143 |
144 |
145 |
146 |
A. "Make our AI code reviews consistent"
147 |
148 | - 1. Add a Code Quality bundle.
149 | - 2. Skim the checklist; edit anything in plain English.
150 | - 3. Install.
151 |
152 |
153 | → Now everyone's AI uses the same review standards.
154 |
155 |
156 |
157 |
158 |
B. "Give our AI a researcher persona"
159 |
160 | - 1. Add a Researcher helper.
161 | - 2. Personalize the brief ("be concise," "cite sources," etc.).
162 | - 3. Install.
163 |
164 |
165 | → Your AI now has a reusable, team-wide "research mode."
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 | ◆ Safety & control
175 |
176 |
177 |
178 |
🛡️
179 |
You stay in charge
180 |
Nothing touches your repo until you run the command.
181 |
182 |
183 |
👁️
184 |
Inspect first
185 |
All content is visible before you install.
186 |
187 |
188 |
↩️
189 |
Easy to undo
190 |
It's just files—use Git like always.
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 | ◆ Who it's for
199 |
200 |
201 |
202 | Developers who want predictable AI help across projects.
203 |
204 |
205 | Team leads who want shared standards the AI actually follows.
206 |
207 |
208 | Newcomers who want results without learning prompt jargon.
209 |
210 |
211 |
212 |
213 |
214 |
215 | FAQ
216 |
217 |
218 |
219 |
Do I need to know anything about "AI agents" or "MCPs"?
220 |
No. You'll pick items with friendly names and edit normal text. The site handles the wiring.
221 |
222 |
223 |
224 |
What if our needs change?
225 |
Edit the files in your repo. Reinstall, or just commit changes like any document.
226 |
227 |
228 |
229 |
Will this lock us in?
230 |
No. The output is regular text files you own.
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 | {% endblock %}
239 |
240 | {% block styles %}
241 |
268 | {% endblock %}
--------------------------------------------------------------------------------
/app/templates/components/final_step_modal.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
Generate Your Configuration
8 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
📋 Summary of Selected Tools
19 |
20 |
21 |
22 |
23 |
24 |
25 |
42 |
43 |
44 |
45 |
👁️ File Preview
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
🔧 Patch Preview
57 |
58 |
The following patch file will be generated for easy application to your repository:
59 |
patch -p0 < gitrules-config.patch
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
73 |
76 |
79 |
80 |
81 |
82 |
83 |
84 |
--------------------------------------------------------------------------------
/app/static/js/workspace_manager.js:
--------------------------------------------------------------------------------
1 | /**
2 | * WorkspaceState - Manages all state for a single workspace context
3 | */
4 | class WorkspaceState {
5 | constructor(contextId) {
6 | this.contextId = contextId;
7 | this.files = {}; // File path -> content mapping
8 | this.selectedFile = null;
9 | this.expandedFolders = new Set();
10 |
11 | // Action/Tool states - unified state for toggles
12 | this.actionStates = {
13 | mcps: {
14 | expanded: false,
15 | active: false,
16 | items: {} // name -> checked state
17 | },
18 | agents: {
19 | expanded: false,
20 | active: false,
21 | items: {} // name -> checked state
22 | },
23 | rules: {
24 | expanded: false,
25 | active: false,
26 | items: {} // name -> checked state
27 | }
28 | };
29 |
30 | // Agent name to file path mappings for easier removal
31 | this.agentMappings = {};
32 |
33 | this.history = {
34 | past: [],
35 | present: null,
36 | future: [],
37 | maxSize: 50
38 | };
39 | }
40 |
41 | // Initialize empty state
42 | initEmpty() {
43 | this.files = {};
44 | this.selectedFile = null;
45 | this.expandedFolders = new Set();
46 | this.actionStates = {
47 | mcps: { expanded: false, active: false, items: {} },
48 | agents: { expanded: false, active: false, items: {} },
49 | rules: { expanded: false, active: false, items: {} }
50 | };
51 | this.agentMappings = {};
52 | this.history.present = this.snapshot();
53 | this.history.past = [];
54 | this.history.future = [];
55 | }
56 |
57 | // Create a snapshot of current state
58 | snapshot() {
59 | return {
60 | files: { ...this.files },
61 | selectedFile: this.selectedFile,
62 | expandedFolders: Array.from(this.expandedFolders),
63 | actionStates: JSON.parse(JSON.stringify(this.actionStates)),
64 | agentMappings: { ...this.agentMappings },
65 | timestamp: new Date().toISOString()
66 | };
67 | }
68 |
69 | // Add a file (Include action)
70 | addFile(path, content) {
71 | this.pushHistory();
72 | this.files[path] = content;
73 | this.selectedFile = path;
74 | this.history.present = this.snapshot();
75 | // Dispatch event for auto-share
76 | window.dispatchEvent(new CustomEvent('workspace-file-added'));
77 | }
78 |
79 | // Delete a file
80 | deleteFile(path) {
81 | this.pushHistory();
82 | delete this.files[path];
83 | if (this.selectedFile === path) {
84 | this.selectedFile = null;
85 | }
86 | this.history.present = this.snapshot();
87 | // Dispatch event for auto-share
88 | window.dispatchEvent(new CustomEvent('workspace-file-deleted'));
89 | }
90 |
91 | // Push current state to history
92 | pushHistory() {
93 | if (this.history.present) {
94 | this.history.past.push(this.history.present);
95 | if (this.history.past.length > this.history.maxSize) {
96 | this.history.past.shift();
97 | }
98 | this.history.future = [];
99 | }
100 | }
101 |
102 | // Go to previous state
103 | undo() {
104 | if (this.history.past.length === 0) return false;
105 |
106 | const previousState = this.history.past.pop();
107 | this.history.future.unshift(this.history.present);
108 | this.history.present = previousState;
109 | this.restoreFromSnapshot(previousState);
110 | return true;
111 | }
112 |
113 | // Go to next state
114 | redo() {
115 | if (this.history.future.length === 0) return false;
116 |
117 | const nextState = this.history.future.shift();
118 | this.history.past.push(this.history.present);
119 | this.history.present = nextState;
120 | this.restoreFromSnapshot(nextState);
121 | return true;
122 | }
123 |
124 | // Restore state from snapshot
125 | restoreFromSnapshot(snapshot) {
126 | this.files = { ...snapshot.files };
127 | this.selectedFile = snapshot.selectedFile;
128 | this.expandedFolders = new Set(snapshot.expandedFolders || []);
129 | this.actionStates = snapshot.actionStates ? JSON.parse(JSON.stringify(snapshot.actionStates)) : {
130 | mcps: { expanded: false, active: false, items: {} },
131 | agents: { expanded: false, active: false, items: {} },
132 | rules: { expanded: false, active: false, items: {} }
133 | };
134 | this.agentMappings = snapshot.agentMappings ? { ...snapshot.agentMappings } : {};
135 | }
136 |
137 | // Reset to empty state
138 | reset() {
139 | this.pushHistory();
140 | this.files = {};
141 | this.selectedFile = null;
142 | this.expandedFolders = new Set();
143 | this.actionStates = {
144 | mcps: { expanded: false, active: false, items: {} },
145 | agents: { expanded: false, active: false, items: {} },
146 | rules: { expanded: false, active: false, items: {} }
147 | };
148 | this.agentMappings = {};
149 | this.history.present = this.snapshot();
150 | }
151 |
152 | // Toggle action category (MCPs, Agents, Rules)
153 | toggleActionCategory(category) {
154 | if (this.actionStates[category]) {
155 | this.actionStates[category].active = !this.actionStates[category].active;
156 | this.actionStates[category].expanded = this.actionStates[category].active;
157 | }
158 | }
159 |
160 | // Set action category state
161 | setActionCategoryState(category, active, expanded) {
162 | if (this.actionStates[category]) {
163 | this.actionStates[category].active = active;
164 | this.actionStates[category].expanded = expanded;
165 | }
166 | }
167 |
168 | // Toggle individual action item
169 | toggleActionItem(category, itemName) {
170 | if (this.actionStates[category]) {
171 | if (!this.actionStates[category].items[itemName]) {
172 | this.actionStates[category].items[itemName] = false;
173 | }
174 | this.actionStates[category].items[itemName] = !this.actionStates[category].items[itemName];
175 | }
176 | }
177 |
178 | // Set action item state
179 | setActionItemState(category, itemName, checked) {
180 | if (this.actionStates[category]) {
181 | this.actionStates[category].items[itemName] = checked;
182 | }
183 | }
184 |
185 | // Check if action category is active
186 | isActionCategoryActive(category) {
187 | return this.actionStates[category]?.active || false;
188 | }
189 |
190 | // Check if action category is expanded
191 | isActionCategoryExpanded(category) {
192 | return this.actionStates[category]?.expanded || false;
193 | }
194 |
195 | // Check if action item is checked
196 | isActionItemChecked(category, itemName) {
197 | return this.actionStates[category]?.items[itemName] || false;
198 | }
199 |
200 | // Check if we can undo/redo
201 | canUndo() {
202 | return this.history.past.length > 0;
203 | }
204 |
205 | canRedo() {
206 | return this.history.future.length > 0;
207 | }
208 |
209 | // Serialize state for localStorage
210 | serialize() {
211 | return JSON.stringify({
212 | contextId: this.contextId,
213 | files: this.files,
214 | selectedFile: this.selectedFile,
215 | expandedFolders: Array.from(this.expandedFolders),
216 | actionStates: this.actionStates,
217 | agentMappings: this.agentMappings,
218 | history: {
219 | past: this.history.past,
220 | present: this.history.present,
221 | future: this.history.future
222 | }
223 | });
224 | }
225 |
226 | // Deserialize state from localStorage
227 | static deserialize(contextId, data) {
228 | const state = new WorkspaceState(contextId);
229 | if (data) {
230 | try {
231 | const parsed = typeof data === 'string' ? JSON.parse(data) : data;
232 | state.files = parsed.files || {};
233 | state.selectedFile = parsed.selectedFile || null;
234 | state.expandedFolders = new Set(parsed.expandedFolders || []);
235 |
236 | // Restore action states
237 | if (parsed.actionStates) {
238 | state.actionStates = parsed.actionStates;
239 | }
240 |
241 | // Restore agent mappings
242 | if (parsed.agentMappings) {
243 | state.agentMappings = parsed.agentMappings;
244 | }
245 |
246 | if (parsed.history) {
247 | state.history.past = parsed.history.past || [];
248 | state.history.present = parsed.history.present || state.snapshot();
249 | state.history.future = parsed.history.future || [];
250 | } else {
251 | state.history.present = state.snapshot();
252 | }
253 | } catch (e) {
254 | console.error('Failed to deserialize state:', e);
255 | state.initEmpty();
256 | }
257 | } else {
258 | state.initEmpty();
259 | }
260 | return state;
261 | }
262 | }
263 |
264 | /**
265 | * WorkspaceManager - Manages multiple workspace contexts
266 | */
267 | class WorkspaceManager {
268 | constructor() {
269 | this.contexts = {};
270 | this.currentContextId = null;
271 | this.currentState = null;
272 | }
273 |
274 | // Initialize the manager
275 | init() {
276 | this.contexts = this.loadContextsList();
277 |
278 | if (!this.contexts['default']) {
279 | this.createContext('default', 'Default Workspace');
280 | }
281 |
282 | const lastContext = localStorage.getItem('app:currentContext');
283 |
284 | if (lastContext && this.contexts[lastContext]) {
285 | this.switchContext(lastContext);
286 | } else {
287 | this.switchContext('default');
288 | }
289 | }
290 |
291 | // Load contexts list from localStorage
292 | loadContextsList() {
293 | try {
294 | const data = localStorage.getItem('app:contexts');
295 | return data ? JSON.parse(data) : {};
296 | } catch {
297 | return {};
298 | }
299 | }
300 |
301 | // Save contexts list to localStorage
302 | saveContextsList() {
303 | localStorage.setItem('app:contexts', JSON.stringify(this.contexts));
304 | }
305 |
306 | // Create a new context
307 | createContext(id, name) {
308 | if (this.contexts[id]) {
309 | console.warn(`Context ${id} already exists`);
310 | return false;
311 | }
312 |
313 | this.contexts[id] = {
314 | id,
315 | name,
316 | createdAt: Date.now()
317 | };
318 |
319 | this.saveContextsList();
320 | return true;
321 | }
322 |
323 | // Switch to a different context
324 | switchContext(contextId) {
325 | if (!this.contexts[contextId] && contextId !== 'default') {
326 | const saved = this.loadContextsList();
327 | if (!saved[contextId]) {
328 | console.error(`Context ${contextId} not found`);
329 | return false;
330 | }
331 | this.contexts = saved;
332 | }
333 |
334 | if (this.currentState) {
335 | this.saveState(this.currentContextId);
336 | }
337 |
338 | this.currentContextId = contextId;
339 | this.currentState = this.loadState(contextId);
340 | localStorage.setItem('app:currentContext', contextId);
341 | this.render();
342 |
343 | return true;
344 | }
345 |
346 | // Delete a context
347 | deleteContext(contextId) {
348 | if (contextId === 'default') {
349 | console.warn('Cannot delete default context');
350 | return false;
351 | }
352 |
353 | delete this.contexts[contextId];
354 | localStorage.removeItem(`app:workspace:${contextId}`);
355 | this.saveContextsList();
356 |
357 | if (this.currentContextId === contextId) {
358 | this.switchContext('default');
359 | }
360 |
361 | return true;
362 | }
363 |
364 | // Save state to localStorage
365 | saveState(contextId) {
366 | if (!this.currentState) return;
367 |
368 | const key = `app:workspace:${contextId}`;
369 | localStorage.setItem(key, this.currentState.serialize());
370 | }
371 |
372 | // Load state from localStorage
373 | loadState(contextId) {
374 | const key = `app:workspace:${contextId}`;
375 | const data = localStorage.getItem(key);
376 | return WorkspaceState.deserialize(contextId, data);
377 | }
378 |
379 | // Get current state
380 | getState() {
381 | return this.currentState;
382 | }
383 |
384 | // Render the UI based on current state
385 | render() {
386 | if (!this.currentState) return;
387 |
388 | // Update file tree
389 | if (window.renderFileTree) {
390 | window.renderFileTree();
391 | }
392 |
393 | // Update editor
394 | if (this.currentState.selectedFile && this.currentState.files[this.currentState.selectedFile]) {
395 | // Selected file exists - load it
396 | if (window.openFile) {
397 | window.openFile(this.currentState.selectedFile);
398 | }
399 | } else {
400 | // No selected file - check if we have any files to auto-select
401 | const fileKeys = Object.keys(this.currentState.files);
402 | if (fileKeys.length > 0 && !this.currentState.selectedFile) {
403 | // Auto-select first file
404 | const firstFile = fileKeys[0];
405 | this.currentState.selectedFile = firstFile;
406 | this.saveState(this.currentContextId);
407 |
408 | if (window.openFile) {
409 | window.openFile(firstFile);
410 | }
411 | } else {
412 | // No files available - clear editor
413 | if (window.workspaceMonacoEditor) {
414 | window.workspaceMonacoEditor.setValue('');
415 | }
416 | if (window.updateFilePathLabel) {
417 | window.updateFilePathLabel(null);
418 | }
419 | }
420 | }
421 |
422 | // Restore action states UI
423 | if (window.restoreActionStates) {
424 | window.restoreActionStates();
425 | }
426 |
427 | this.updateHistoryButtons();
428 | this.updateContextDropdown();
429 | }
430 |
431 | // Update history button states
432 | updateHistoryButtons() {
433 | const prevBtn = document.getElementById('files-prev-btn');
434 | const nextBtn = document.getElementById('files-next-btn');
435 |
436 | if (prevBtn) {
437 | prevBtn.disabled = !this.currentState.canUndo();
438 | }
439 | if (nextBtn) {
440 | nextBtn.disabled = !this.currentState.canRedo();
441 | }
442 | }
443 |
444 | // Update context dropdown
445 | updateContextDropdown() {
446 | const contextSwitcher = document.getElementById('context-switcher');
447 | if (!contextSwitcher) return;
448 |
449 | contextSwitcher.innerHTML = '';
450 | Object.values(this.contexts).forEach(ctx => {
451 | const option = document.createElement('option');
452 | option.value = ctx.id;
453 | // Truncate long names to fit the dropdown
454 | let displayName = ctx.name;
455 | if (displayName.length > 25) {
456 | displayName = displayName.substring(0, 22) + '...';
457 | }
458 | option.textContent = displayName;
459 | option.title = ctx.name; // Show full name on hover
460 | option.selected = ctx.id === this.currentContextId;
461 | contextSwitcher.appendChild(option);
462 | });
463 | }
464 |
465 | // Include a file (main action for adding files)
466 | includeFile(path, content) {
467 | if (!this.currentState) return false;
468 |
469 | this.currentState.addFile(path, content);
470 | this.saveState(this.currentContextId);
471 | this.render();
472 | return true;
473 | }
474 |
475 | // Delete a file
476 | deleteFile(path) {
477 | if (!this.currentState) return false;
478 |
479 | this.currentState.deleteFile(path);
480 | this.saveState(this.currentContextId);
481 | this.render();
482 | return true;
483 | }
484 |
485 | // Undo action
486 | undo() {
487 | if (!this.currentState) return;
488 |
489 | if (this.currentState.undo()) {
490 | this.saveState(this.currentContextId);
491 | this.render();
492 | }
493 | }
494 |
495 | // Redo action
496 | redo() {
497 | if (!this.currentState) return;
498 |
499 | if (this.currentState.redo()) {
500 | this.saveState(this.currentContextId);
501 | this.render();
502 | }
503 | }
504 |
505 | // Reset current workspace
506 | reset() {
507 | if (!this.currentState) return;
508 |
509 | // Reset without confirmation
510 | this.currentState.reset();
511 | this.saveState(this.currentContextId);
512 | this.render();
513 | }
514 | }
515 |
516 | // Export for global use
517 | window.WorkspaceState = WorkspaceState;
518 | window.WorkspaceManager = WorkspaceManager;
--------------------------------------------------------------------------------