├── src ├── generators │ ├── plans │ │ ├── index.ts │ │ ├── templates │ │ │ ├── types.ts │ │ │ ├── indexTemplate.ts │ │ │ └── planTemplate.ts │ │ ├── planGenerator.test.ts │ │ └── planGenerator.ts │ ├── documentation │ │ ├── index.ts │ │ ├── templates │ │ │ ├── types.ts │ │ │ ├── index.ts │ │ │ ├── securityTemplate.ts │ │ │ ├── toolingTemplate.ts │ │ │ ├── developmentWorkflowTemplate.ts │ │ │ ├── testingTemplate.ts │ │ │ ├── dataFlowTemplate.ts │ │ │ ├── indexTemplate.ts │ │ │ ├── glossaryTemplate.ts │ │ │ ├── common.ts │ │ │ ├── projectOverviewTemplate.ts │ │ │ ├── architectureTemplate.ts │ │ │ ├── troubleshootingTemplate.ts │ │ │ ├── migrationTemplate.ts │ │ │ └── apiReferenceTemplate.ts │ │ ├── guideRegistry.ts │ │ ├── documentationGenerator.test.ts │ │ └── documentationGenerator.ts │ ├── shared │ │ ├── index.ts │ │ ├── contextGenerator.ts │ │ ├── directoryTemplateHelpers.ts │ │ └── generatorUtils.ts │ └── agents │ │ ├── templates │ │ ├── index.ts │ │ ├── types.ts │ │ ├── indexTemplate.ts │ │ └── playbookTemplate.ts │ │ ├── index.ts │ │ ├── agentTypes.ts │ │ ├── agentGenerator.ts │ │ ├── agentGenerator.test.ts │ │ └── agentConfig.ts ├── services │ ├── llmClientFactory.ts │ ├── openRouterClient.ts │ ├── shared │ │ └── llmConfig.ts │ ├── baseLLMClient.ts │ ├── init │ │ └── initService.ts │ └── fill │ │ └── fillService.ts ├── types.ts ├── utils │ ├── versionChecker.ts │ ├── versionChecker.test.ts │ ├── promptLoader.ts │ ├── promptLoader.test.ts │ ├── fileMapper.ts │ └── cliUI.ts ├── runInit.integration.test.ts ├── cli.test.ts └── prompts │ └── defaults.ts ├── .env.example ├── .npmignore ├── .gitignore ├── jest.config.js ├── tsconfig.json ├── .github └── workflows │ ├── release.yml │ └── ci.yml ├── LICENSE ├── example-documentation.ts ├── CONTRIBUTING.md ├── package.json ├── prompts ├── update_plan_prompt.md └── update_scaffold_prompt.md ├── AGENTS.md └── README.md /src/generators/plans/index.ts: -------------------------------------------------------------------------------- 1 | export { PlanGenerator } from './planGenerator'; 2 | -------------------------------------------------------------------------------- /src/generators/documentation/index.ts: -------------------------------------------------------------------------------- 1 | export { DocumentationGenerator } from './documentationGenerator'; 2 | -------------------------------------------------------------------------------- /src/generators/shared/index.ts: -------------------------------------------------------------------------------- 1 | export { GeneratorUtils } from './generatorUtils'; 2 | export { ContextGenerator } from './contextGenerator'; 3 | export { formatDirectoryList } from './directoryTemplateHelpers'; 4 | -------------------------------------------------------------------------------- /src/generators/agents/templates/index.ts: -------------------------------------------------------------------------------- 1 | export { renderAgentPlaybook } from './playbookTemplate'; 2 | export { renderAgentIndex } from './indexTemplate'; 3 | export type { AgentTemplateContext, DocTouchpoint } from './types'; 4 | -------------------------------------------------------------------------------- /src/generators/agents/index.ts: -------------------------------------------------------------------------------- 1 | export { AgentGenerator } from './agentGenerator'; 2 | export { AGENT_TYPES, AgentType, IMPORTANT_FILES } from './agentTypes'; 3 | export { AGENT_RESPONSIBILITIES, AGENT_BEST_PRACTICES } from './agentConfig'; 4 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | # OpenRouter API Configuration 2 | OPENROUTER_API_KEY=your_openrouter_api_key_here 3 | 4 | # Optional: Override default model 5 | # OPENROUTER_MODEL=x-ai/grok-4-fast 6 | 7 | # Optional: Custom base URL (rarely needed) 8 | # OPENROUTER_BASE_URL=https://openrouter.ai/api/v1 9 | -------------------------------------------------------------------------------- /src/generators/shared/contextGenerator.ts: -------------------------------------------------------------------------------- 1 | import { FileMapper } from '../../utils/fileMapper'; 2 | 3 | export class ContextGenerator { 4 | constructor(protected readonly fileMapper: FileMapper) {} 5 | 6 | protected async loadFileContent(path: string): Promise { 7 | return this.fileMapper.readFileContent(path); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /src/generators/shared/directoryTemplateHelpers.ts: -------------------------------------------------------------------------------- 1 | export function formatDirectoryList(directories: string[], placeholderMessage?: string): string { 2 | if (!directories.length) { 3 | return placeholderMessage ?? ''; 4 | } 5 | 6 | return directories 7 | .map(dir => `- \`${dir}/\` — TODO: Describe the purpose of this directory.`) 8 | .join('\n'); 9 | } 10 | -------------------------------------------------------------------------------- /src/generators/agents/templates/types.ts: -------------------------------------------------------------------------------- 1 | import { AgentType } from '../agentTypes'; 2 | 3 | export interface DocTouchpoint { 4 | title: string; 5 | path: string; 6 | marker: string; 7 | } 8 | 9 | export interface AgentTemplateContext { 10 | agentType: AgentType; 11 | topLevelDirectories: string[]; 12 | docTouchpoints: DocTouchpoint[]; 13 | responsibilities: string[]; 14 | bestPractices: string[]; 15 | } 16 | -------------------------------------------------------------------------------- /src/generators/plans/templates/types.ts: -------------------------------------------------------------------------------- 1 | import { AgentType } from '../../agents/agentTypes'; 2 | import { GuideMeta } from '../../documentation/templates/types'; 3 | 4 | export interface PlanAgentSummary { 5 | type: AgentType; 6 | title: string; 7 | responsibility: string; 8 | } 9 | 10 | export interface PlanTemplateContext { 11 | title: string; 12 | slug: string; 13 | summary?: string; 14 | agents: PlanAgentSummary[]; 15 | docs: GuideMeta[]; 16 | } 17 | 18 | export interface PlanIndexEntry { 19 | slug: string; 20 | title: string; 21 | } 22 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | # Source files 2 | src/ 3 | *.ts 4 | 5 | # Configuration files 6 | tsconfig.json 7 | .env 8 | .env.* 9 | !.env.example 10 | 11 | # Development files 12 | node_modules/ 13 | *.log 14 | .DS_Store 15 | 16 | # CI/CD 17 | .github/ 18 | .gitignore 19 | 20 | # Test files 21 | **/*.test.* 22 | **/*.spec.* 23 | coverage/ 24 | jest.config.* 25 | 26 | # Build files 27 | *.tsbuildinfo 28 | 29 | # IDE 30 | .vscode/ 31 | .idea/ 32 | *.swp 33 | *.swo 34 | 35 | # Documentation source 36 | docs/ 37 | *.md 38 | !README.md 39 | 40 | # Examples 41 | examples/ 42 | ai-context-output/ -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | 4 | # Build output 5 | dist/ 6 | *.tsbuildinfo 7 | 8 | # Environment files 9 | .env 10 | .env.* 11 | !.env.example 12 | 13 | # Logs 14 | *.log 15 | npm-debug.log* 16 | yarn-debug.log* 17 | yarn-error.log* 18 | 19 | # OS files 20 | .DS_Store 21 | Thumbs.db 22 | 23 | # IDE 24 | .vscode/ 25 | .idea/ 26 | *.swp 27 | *.swo 28 | 29 | # Test coverage 30 | coverage/ 31 | .nyc_output/ 32 | 33 | # Output directories 34 | output/ 35 | 36 | # AI Context state files 37 | context-log.json 38 | 39 | # Temporary files 40 | *.tmp 41 | *.temp 42 | .cache/ -------------------------------------------------------------------------------- /src/generators/documentation/templates/types.ts: -------------------------------------------------------------------------------- 1 | import { RepoStructure } from '../../../types'; 2 | 3 | export interface GuideMeta { 4 | key: string; 5 | title: string; 6 | file: string; 7 | marker: string; 8 | primaryInputs: string; 9 | } 10 | 11 | export interface DirectoryStat { 12 | name: string; 13 | fileCount: number; 14 | } 15 | 16 | export interface DocumentationTemplateContext { 17 | repoStructure: RepoStructure; 18 | topLevelDirectories: string[]; 19 | primaryLanguages: Array<{ extension: string; count: number }>; 20 | directoryStats: DirectoryStat[]; 21 | guides: GuideMeta[]; 22 | } 23 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | /** @type {import('jest').Config} */ 2 | module.exports = { 3 | preset: 'ts-jest', 4 | testEnvironment: 'node', 5 | roots: ['/src'], 6 | testMatch: ['**/__tests__/**/*.ts', '**/?(*.)+(spec|test).ts'], 7 | transform: { 8 | '^.+\\.ts$': 'ts-jest', 9 | }, 10 | collectCoverageFrom: [ 11 | 'src/**/*.ts', 12 | '!src/**/*.d.ts', 13 | '!src/**/index.ts', 14 | ], 15 | coverageDirectory: 'coverage', 16 | coverageReporters: ['text', 'lcov', 'html'], 17 | moduleFileExtensions: ['ts', 'js', 'json'], 18 | testPathIgnorePatterns: ['/node_modules/', '/dist/'], 19 | verbose: true, 20 | }; -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "commonjs", 5 | "outDir": "./dist", 6 | "rootDir": "./src", 7 | "strict": true, 8 | "esModuleInterop": true, 9 | "skipLibCheck": true, 10 | "forceConsistentCasingInFileNames": true, 11 | "resolveJsonModule": true, 12 | "declaration": true, 13 | "declarationMap": true, 14 | "sourceMap": true, 15 | "baseUrl": "./src", 16 | "paths": { 17 | "@generators/agents/*": ["generators/agents/*"] 18 | } 19 | }, 20 | "include": ["src/**/*"], 21 | "exclude": ["node_modules", "dist", "**/*.test.ts"] 22 | } -------------------------------------------------------------------------------- /src/generators/agents/agentTypes.ts: -------------------------------------------------------------------------------- 1 | export const AGENT_TYPES = [ 2 | 'code-reviewer', 3 | 'bug-fixer', 4 | 'feature-developer', 5 | 'refactoring-specialist', 6 | 'test-writer', 7 | 'documentation-writer', 8 | 'performance-optimizer', 9 | 'security-auditor', 10 | 'backend-specialist', 11 | 'frontend-specialist', 12 | 'architect-specialist', 13 | 'devops-specialist', 14 | 'database-specialist', 15 | 'mobile-specialist' 16 | ] as const; 17 | 18 | export type AgentType = typeof AGENT_TYPES[number]; 19 | 20 | export const IMPORTANT_FILES = [ 21 | 'package.json', 'tsconfig.json', 'webpack.config.js', 22 | 'next.config.js', 'tailwind.config.js', 'README.md', 23 | '.gitignore', 'Dockerfile', 'docker-compose.yml' 24 | ]; -------------------------------------------------------------------------------- /src/services/llmClientFactory.ts: -------------------------------------------------------------------------------- 1 | import { LLMConfig, OpenRouterConfig } from '../types'; 2 | import { BaseLLMClient } from './baseLLMClient'; 3 | import { OpenRouterClient } from './openRouterClient'; 4 | 5 | export class LLMClientFactory { 6 | static createClient(config: LLMConfig): BaseLLMClient { 7 | // Convert LLMConfig to OpenRouterConfig for backward compatibility 8 | const openRouterConfig: OpenRouterConfig = { 9 | apiKey: config.apiKey, 10 | baseUrl: config.baseUrl || 'https://openrouter.ai/api/v1', 11 | model: config.model 12 | }; 13 | return new OpenRouterClient(openRouterConfig); 14 | } 15 | 16 | static getDefaultModel(): string { 17 | return 'x-ai/grok-4-fast'; 18 | } 19 | 20 | static getEnvironmentVariables(): string[] { 21 | return ['OPENROUTER_API_KEY']; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/index.ts: -------------------------------------------------------------------------------- 1 | export { renderIndex } from './indexTemplate'; 2 | export { renderProjectOverview } from './projectOverviewTemplate'; 3 | export { renderArchitectureNotes } from './architectureTemplate'; 4 | export { renderDevelopmentWorkflow } from './developmentWorkflowTemplate'; 5 | export { renderTestingStrategy } from './testingTemplate'; 6 | export { renderGlossary } from './glossaryTemplate'; 7 | export { renderDataFlow } from './dataFlowTemplate'; 8 | export { renderSecurity } from './securityTemplate'; 9 | export { renderToolingGuide } from './toolingTemplate'; 10 | export { renderApiReference } from './apiReferenceTemplate'; 11 | export { renderTroubleshooting } from './troubleshootingTemplate'; 12 | export { renderMigration } from './migrationTemplate'; 13 | export { renderOnboarding } from './onboardingTemplate'; 14 | export type { DocumentationTemplateContext, GuideMeta, DirectoryStat } from './types'; 15 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | workflow_dispatch: 8 | inputs: 9 | version: 10 | description: 'Version type (patch, minor, major)' 11 | required: true 12 | default: 'patch' 13 | type: choice 14 | options: 15 | - patch 16 | - minor 17 | - major 18 | 19 | jobs: 20 | publish-dry-run: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - name: Use Node.js 26 | uses: actions/setup-node@v4 27 | with: 28 | node-version: '20.x' 29 | registry-url: 'https://registry.npmjs.org' 30 | cache: 'npm' 31 | 32 | - name: Install dependencies 33 | run: npm ci 34 | 35 | - name: Build 36 | run: npm run build 37 | 38 | - name: Dry run publish 39 | run: npm publish --dry-run 40 | env: 41 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 AI Coders 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /example-documentation.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Example script demonstrating how to scaffold documentation and agent playbooks 3 | * programmatically without going through the CLI binary. 4 | */ 5 | 6 | import path from 'node:path'; 7 | import { DocumentationGenerator } from './src/generators/documentation'; 8 | import { AgentGenerator } from './src/generators/agents'; 9 | import { FileMapper } from './src/utils/fileMapper'; 10 | 11 | async function scaffoldRepo(repoRoot: string, outputDir: string = path.join(repoRoot, '.context')) { 12 | const fileMapper = new FileMapper(); 13 | const documentationGenerator = new DocumentationGenerator(); 14 | const agentGenerator = new AgentGenerator(); 15 | 16 | const repoStructure = await fileMapper.mapRepository(repoRoot); 17 | 18 | await documentationGenerator.generateDocumentation(repoStructure, outputDir, {}, true); 19 | await agentGenerator.generateAgentPrompts(repoStructure, outputDir, true); 20 | 21 | console.log(`Scaffold written to ${outputDir}`); 22 | } 23 | 24 | if (require.main === module) { 25 | const repoRoot = process.argv[2] ? path.resolve(process.argv[2]) : process.cwd(); 26 | scaffoldRepo(repoRoot).catch(error => { 27 | console.error('Failed to scaffold repository:', error); 28 | process.exit(1); 29 | }); 30 | } 31 | 32 | export { scaffoldRepo }; 33 | -------------------------------------------------------------------------------- /src/generators/agents/templates/indexTemplate.ts: -------------------------------------------------------------------------------- 1 | import { AgentType } from '../agentTypes'; 2 | import { AGENT_RESPONSIBILITIES } from '../agentConfig'; 3 | 4 | export function renderAgentIndex(agentTypes: readonly AgentType[]): string { 5 | const agentEntries = agentTypes.map(type => { 6 | const title = formatTitle(type); 7 | const primaryResponsibility = AGENT_RESPONSIBILITIES[type]?.[0] || 'Document responsibilities here.'; 8 | return `- [${title}](./${type}.md) — ${primaryResponsibility}`; 9 | }).join('\n'); 10 | 11 | return `# Agent Handbook 12 | 13 | This directory contains ready-to-customize playbooks for AI agents collaborating on the repository. 14 | 15 | ## Available Agents 16 | ${agentEntries} 17 | 18 | ## How To Use These Playbooks 19 | 1. Pick the agent that matches your task. 20 | 2. Enrich the template with project-specific context or links. 21 | 3. Share the final prompt with your AI assistant. 22 | 4. Capture learnings in the relevant documentation file so future runs improve. 23 | 24 | ## Related Resources 25 | - [Documentation Index](../docs/README.md) 26 | - [Agent Knowledge Base](../../AGENTS.md) 27 | - [Contributor Guidelines](../../CONTRIBUTING.md) 28 | `; 29 | } 30 | 31 | function formatTitle(agentType: string): string { 32 | return agentType 33 | .split('-') 34 | .map(segment => segment.charAt(0).toUpperCase() + segment.slice(1)) 35 | .join(' '); 36 | } 37 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/securityTemplate.ts: -------------------------------------------------------------------------------- 1 | 2 | export function renderSecurity(): string { 3 | 4 | return ` 5 | 6 | # Security & Compliance Notes 7 | 8 | Capture the policies and guardrails that keep this project secure and compliant. 9 | 10 | ## Authentication & Authorization 11 | - Describe identity providers, token formats, session strategies, and role/permission models. 12 | 13 | ## Secrets & Sensitive Data 14 | - Storage locations (vaults, parameter stores), rotation cadence, encryption practices, and data classifications. 15 | 16 | ## Compliance & Policies 17 | - Applicable standards (GDPR, SOC2, HIPAA, internal policies) and evidence requirements. 18 | 19 | ## Incident Response 20 | - On-call contacts, escalation steps, and tooling for detection, triage, and post-incident analysis. 21 | 22 | 23 | ## AI Update Checklist 24 | 1. Confirm security libraries and infrastructure match current deployments. 25 | 2. Update secrets management details when storage or naming changes. 26 | 3. Reflect new compliance obligations or audit findings. 27 | 4. Ensure incident response procedures include current contacts and tooling. 28 | 29 | 30 | ## Acceptable Sources 31 | - Security architecture docs, runbooks, policy handbooks. 32 | - IAM/authorization configuration (code or infrastructure). 33 | - Compliance updates from security or legal teams. 34 | 35 | 36 | `; 37 | } 38 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/toolingTemplate.ts: -------------------------------------------------------------------------------- 1 | 2 | export function renderToolingGuide(): string { 3 | 4 | return ` 5 | 6 | # Tooling & Productivity Guide 7 | 8 | Collect the scripts, automation, and editor settings that keep contributors efficient. 9 | 10 | ## Required Tooling 11 | - Tool name — How to install, version requirements, what it powers. 12 | 13 | ## Recommended Automation 14 | - Pre-commit hooks, linting/formatting commands, code generators, or scaffolding scripts. 15 | - Shortcuts or watch modes for local development loops. 16 | 17 | ## IDE / Editor Setup 18 | - Extensions or plugins that catch issues early. 19 | - Snippets, templates, or workspace settings worth sharing. 20 | 21 | ## Productivity Tips 22 | - Terminal aliases, container workflows, or local emulators mirroring production. 23 | - Links to shared scripts or dotfiles used across the team. 24 | 25 | 26 | ## AI Update Checklist 27 | 1. Verify commands align with the latest scripts and build tooling. 28 | 2. Remove instructions for deprecated tools and add replacements. 29 | 3. Highlight automation that saves time during reviews or releases. 30 | 4. Cross-link to runbooks or README sections that provide deeper context. 31 | 32 | 33 | ## Acceptable Sources 34 | - Onboarding docs, internal wikis, and team retrospectives. 35 | - Script directories, package manifests, CI configuration. 36 | - Maintainer recommendations gathered during pairing or code reviews. 37 | 38 | 39 | `; 40 | } 41 | -------------------------------------------------------------------------------- /src/types.ts: -------------------------------------------------------------------------------- 1 | export interface FileInfo { 2 | path: string; 3 | relativePath: string; 4 | extension: string; 5 | size: number; 6 | content?: string; 7 | type: 'file' | 'directory'; 8 | } 9 | 10 | export interface TopLevelDirectoryStats { 11 | name: string; 12 | fileCount: number; 13 | totalSize: number; 14 | } 15 | 16 | export interface RepoStructure { 17 | rootPath: string; 18 | files: FileInfo[]; 19 | directories: FileInfo[]; 20 | totalFiles: number; 21 | totalSize: number; 22 | topLevelDirectoryStats: TopLevelDirectoryStats[]; 23 | } 24 | 25 | export interface OpenRouterConfig { 26 | apiKey: string; 27 | baseUrl: string; 28 | model: string; 29 | } 30 | 31 | export interface LLMConfig { 32 | apiKey: string; 33 | model: string; 34 | baseUrl?: string; 35 | provider: 'openrouter'; 36 | } 37 | 38 | export interface CLIOptions { 39 | repoPath: string; 40 | outputDir?: string; 41 | model?: string; 42 | apiKey?: string; 43 | provider?: LLMConfig['provider']; 44 | exclude?: string[]; 45 | include?: string[]; 46 | verbose?: boolean; 47 | since?: string; 48 | staged?: boolean; 49 | force?: boolean; 50 | } 51 | 52 | export interface AgentPrompt { 53 | name: string; 54 | description: string; 55 | systemPrompt: string; 56 | context: string; 57 | examples?: string[]; 58 | } 59 | 60 | export interface TokenUsage { 61 | promptTokens: number; 62 | completionTokens: number; 63 | totalTokens: number; 64 | } 65 | 66 | export interface UsageStats { 67 | totalCalls: number; 68 | totalPromptTokens: number; 69 | totalCompletionTokens: number; 70 | totalTokens: number; 71 | model: string; 72 | } 73 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/developmentWorkflowTemplate.ts: -------------------------------------------------------------------------------- 1 | export function renderDevelopmentWorkflow(): string { 2 | return ` 3 | # Development Workflow 4 | 5 | Outline the day-to-day engineering process for this repository. 6 | 7 | ## Branching & Releases 8 | - Describe the branching model (e.g., trunk-based, Git Flow). 9 | - Note release cadence and tagging conventions. 10 | 11 | ## Local Development 12 | - Commands to install dependencies: \`npm install\` 13 | - Run the CLI locally: \`npm run dev\` 14 | - Build for distribution: \`npm run build\` 15 | 16 | ## Code Review Expectations 17 | - Summarize review checklists and required approvals. 18 | - Reference [AGENTS.md](../../AGENTS.md) for agent collaboration tips. 19 | 20 | ## Onboarding Tasks 21 | - Point newcomers to first issues or starter tickets. 22 | - Link to internal runbooks or dashboards. 23 | 24 | 25 | ## AI Update Checklist 26 | 1. Confirm branching/release steps with CI configuration and recent tags. 27 | 2. Verify local commands against \`package.json\`; ensure flags and scripts still exist. 28 | 3. Capture review requirements (approvers, checks) from contributing docs or repository settings. 29 | 4. Refresh onboarding links (boards, dashboards) to their latest URLs. 30 | 5. Highlight any manual steps that should become automation follow-ups. 31 | 32 | 33 | ## Acceptable Sources 34 | - CONTRIBUTING guidelines and \`AGENTS.md\`. 35 | - Build pipelines, branch protection rules, or release scripts. 36 | - Issue tracker boards used for onboarding or triage. 37 | 38 | 39 | `; 40 | } 41 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/testingTemplate.ts: -------------------------------------------------------------------------------- 1 | 2 | export function renderTestingStrategy(): string { 3 | 4 | return ` 5 | 6 | # Testing Strategy 7 | 8 | Document how quality is maintained across the codebase. 9 | 10 | ## Test Types 11 | - Unit: List frameworks (e.g., Jest) and file naming conventions. 12 | - Integration: Describe scenarios and required tooling. 13 | - End-to-end: Note harnesses or environments if applicable. 14 | 15 | ## Running Tests 16 | - Execute all tests with \`npm run test\`. 17 | - Use watch mode locally: \`npm run test -- --watch\`. 18 | - Add coverage runs before releases: \`npm run test -- --coverage\`. 19 | 20 | ## Quality Gates 21 | - Define minimum coverage expectations. 22 | - Capture linting or formatting requirements before merging. 23 | 24 | ## Troubleshooting 25 | - Document flaky suites, long-running tests, or environment quirks. 26 | 27 | 28 | ## AI Update Checklist 29 | 1. Review test scripts and CI workflows to confirm command accuracy. 30 | 2. Update Quality Gates with current thresholds (coverage %, lint rules, required checks). 31 | 3. Document new test categories or suites introduced since the last update. 32 | 4. Record known flaky areas and link to open issues for visibility. 33 | 5. Confirm troubleshooting steps remain valid with current tooling. 34 | 35 | 36 | ## Acceptable Sources 37 | - \`package.json\` scripts and testing configuration files. 38 | - CI job definitions (GitHub Actions, CircleCI, etc.). 39 | - Issue tracker items labelled “testing” or “flaky” with maintainer confirmation. 40 | 41 | 42 | `; 43 | } 44 | -------------------------------------------------------------------------------- /src/services/openRouterClient.ts: -------------------------------------------------------------------------------- 1 | import axios, { AxiosInstance } from 'axios'; 2 | import { OpenRouterConfig } from '../types'; 3 | import { BaseLLMClient } from './baseLLMClient'; 4 | 5 | export class OpenRouterClient extends BaseLLMClient { 6 | private client: AxiosInstance; 7 | private config: OpenRouterConfig; 8 | 9 | constructor(config: OpenRouterConfig) { 10 | super(config.model); 11 | this.config = config; 12 | this.client = axios.create({ 13 | baseURL: config.baseUrl || 'https://openrouter.ai/api/v1', 14 | headers: { 15 | 'Authorization': `Bearer ${config.apiKey}`, 16 | 'Content-Type': 'application/json', 17 | 'HTTP-Referer': 'https://ai-coders-context', 18 | 'X-Title': 'AI Coders Context' 19 | } 20 | }); 21 | } 22 | 23 | async generateText(prompt: string, systemPrompt?: string): Promise { 24 | try { 25 | const messages = []; 26 | 27 | if (systemPrompt) { 28 | messages.push({ role: 'system', content: systemPrompt }); 29 | } 30 | 31 | messages.push({ role: 'user', content: prompt }); 32 | 33 | const response = await this.client.post('/chat/completions', { 34 | model: this.config.model || 'x-ai/grok-4-fast', 35 | messages, 36 | max_tokens: 4000, 37 | temperature: 0.7 38 | }); 39 | 40 | // Track usage statistics 41 | this.trackUsage(response.data.usage); 42 | 43 | return response.data.choices[0]?.message?.content || ''; 44 | } catch (error) { 45 | if (axios.isAxiosError(error)) { 46 | throw new Error(`OpenRouter API error: ${error.response?.data?.error?.message || error.message}`); 47 | } 48 | throw error; 49 | } 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/dataFlowTemplate.ts: -------------------------------------------------------------------------------- 1 | import { DocumentationTemplateContext } from './types'; 2 | import { formatInlineDirectoryList } from './common'; 3 | 4 | export function renderDataFlow(context: DocumentationTemplateContext): string { 5 | return ` 6 | # Data Flow & Integrations 7 | 8 | Explain how data enters, moves through, and exits the system, including interactions with external services. 9 | 10 | ## High-level Flow 11 | - Summarize the primary pipeline from input to output. Reference diagrams or embed Mermaid definitions when available. 12 | 13 | ## Internal Movement 14 | - Describe how modules within ${formatInlineDirectoryList(context.topLevelDirectories)} collaborate (queues, events, RPC calls, shared databases). 15 | 16 | ## External Integrations 17 | - **Integration** — Purpose, authentication, payload shapes, retry strategy. 18 | 19 | ## Observability & Failure Modes 20 | - Metrics, traces, or logs that monitor the flow. 21 | - Backoff, dead-letter, or compensating actions when downstream systems fail. 22 | 23 | 24 | ## AI Update Checklist 25 | 1. Validate flows against the latest integration contracts or diagrams. 26 | 2. Update authentication, scopes, or rate limits when they change. 27 | 3. Capture recent incidents or lessons learned that influenced reliability. 28 | 4. Link to runbooks or dashboards used during triage. 29 | 30 | 31 | ## Acceptable Sources 32 | - Architecture diagrams, ADRs, integration playbooks. 33 | - API specs, queue/topic definitions, infrastructure code. 34 | - Postmortems or incident reviews impacting data movement. 35 | 36 | 37 | `; 38 | } 39 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/indexTemplate.ts: -------------------------------------------------------------------------------- 1 | import { buildDocumentMapTable, formatDirectoryList } from './common'; 2 | import { DocumentationTemplateContext } from './types'; 3 | 4 | export function renderIndex(context: DocumentationTemplateContext): string { 5 | 6 | const directoryList = formatDirectoryList(context, false); 7 | const documentMap = buildDocumentMapTable(context.guides); 8 | const navigationList = context.guides 9 | .map(guide => `- [${guide.title}](./${guide.file})`) 10 | .join('\n') || '- *No guides selected.*'; 11 | 12 | return ` 13 | 14 | # Documentation Index 15 | 16 | Welcome to the repository knowledge base. Start with the project overview, then dive into specific guides as needed. 17 | 18 | ## Core Guides 19 | ${navigationList} 20 | 21 | ## Repository Snapshot 22 | ${directoryList || '*Top-level directories will appear here once the repository contains subfolders.*'} 23 | 24 | ## Document Map 25 | ${documentMap} 26 | 27 | 28 | ## AI Update Checklist 29 | 1. Gather context with \`git status -sb\` plus the latest commits touching \`docs/\` or \`agents/\`. 30 | 2. Compare the current directory tree against the table above; add or retire rows accordingly. 31 | 3. Update cross-links if guides moved or were renamed; keep anchor text concise. 32 | 4. Record sources consulted inside the commit or PR description for traceability. 33 | 34 | 35 | ## Acceptable Sources 36 | - Repository tree and \`package.json\` scripts for canonical command names. 37 | - Maintainer-approved issues, RFCs, or product briefs referenced in the repo. 38 | - Release notes or changelog entries that announce documentation changes. 39 | 40 | 41 | `; 42 | } 43 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/glossaryTemplate.ts: -------------------------------------------------------------------------------- 1 | import { DocumentationTemplateContext } from './types'; 2 | 3 | export function renderGlossary(_context: DocumentationTemplateContext): string { 4 | 5 | return ` 6 | 7 | # Glossary & Domain Concepts 8 | 9 | List project-specific terminology, acronyms, domain entities, and user personas. 10 | 11 | ## Core Terms 12 | - **Term** — Definition, relevance, and where it surfaces in the codebase. 13 | - **Term** — Definition, domain context, related modules. 14 | 15 | ## Acronyms & Abbreviations 16 | - **ABC** — Expanded form; why we use it; associated services or APIs. 17 | 18 | ## Personas / Actors 19 | - **Persona Name** — Goals, key workflows, pain points addressed by the system. 20 | 21 | ## Domain Rules & Invariants 22 | - Capture business rules, validation constraints, or compliance requirements that the code enforces. 23 | - Note any region, localization, or regulatory nuances. 24 | 25 | 26 | ## AI Update Checklist 27 | 1. Harvest terminology from recent PRs, issues, and discussions. 28 | 2. Confirm definitions with product or domain experts when uncertain. 29 | 3. Link terms to relevant docs or modules for deeper context. 30 | 4. Remove or archive outdated concepts; flag unknown terms for follow-up. 31 | 32 | 33 | ## Acceptable Sources 34 | - Product requirement docs, RFCs, user research, or support tickets. 35 | - Service contracts, API schemas, data dictionaries. 36 | - Conversations with domain experts (summarize outcomes if applicable). 37 | 38 | 39 | `; 40 | } 41 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ main, develop ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | 13 | strategy: 14 | matrix: 15 | node-version: [20.x, 22.x, 23.x, 24.x] 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Use Node.js ${{ matrix.node-version }} 21 | uses: actions/setup-node@v4 22 | with: 23 | node-version: ${{ matrix.node-version }} 24 | cache: 'npm' 25 | 26 | - name: Install dependencies 27 | run: npm ci 28 | 29 | - name: Build 30 | run: npm run build 31 | 32 | - name: Run Tests 33 | run: npm test 34 | 35 | - name: Test CLI 36 | run: | 37 | node dist/index.js --help 38 | node dist/index.js analyze --help 39 | node dist/index.js init --help 40 | node dist/index.js update --help 41 | node dist/index.js preview --help 42 | 43 | build-and-test: 44 | runs-on: ${{ matrix.os }} 45 | 46 | strategy: 47 | matrix: 48 | os: [ubuntu-latest, windows-latest, macos-latest] 49 | node-version: [20.x, 22.x, 23.x, 24.x] 50 | 51 | steps: 52 | - uses: actions/checkout@v4 53 | 54 | - name: Use Node.js ${{ matrix.node-version }} 55 | uses: actions/setup-node@v4 56 | with: 57 | node-version: ${{ matrix.node-version }} 58 | cache: 'npm' 59 | 60 | - name: Install dependencies 61 | run: npm ci 62 | 63 | - name: Build 64 | run: npm run build 65 | 66 | - name: Upload build artifacts 67 | uses: actions/upload-artifact@v4 68 | with: 69 | name: dist-${{ matrix.os }}-${{ matrix.node-version }} 70 | path: dist/ 71 | overwrite: true 72 | -------------------------------------------------------------------------------- /src/services/shared/llmConfig.ts: -------------------------------------------------------------------------------- 1 | import type { TranslateFn } from '../../utils/i18n'; 2 | import type { LLMConfig } from '../../types'; 3 | import { LLMClientFactory } from '../llmClientFactory'; 4 | 5 | export interface ResolvedLlmConfig { 6 | provider: LLMConfig['provider']; 7 | model: string; 8 | apiKey: string; 9 | baseUrl?: string; 10 | } 11 | 12 | export interface ResolveLlmConfigOptions { 13 | rawOptions: { 14 | provider?: LLMConfig['provider']; 15 | model?: string; 16 | apiKey?: string; 17 | baseUrl?: string; 18 | }; 19 | fallbackModel: string; 20 | t: TranslateFn; 21 | factory?: typeof LLMClientFactory; 22 | } 23 | 24 | export async function resolveLlmConfig({ 25 | rawOptions, 26 | fallbackModel, 27 | t, 28 | factory = LLMClientFactory 29 | }: ResolveLlmConfigOptions): Promise { 30 | const envVars = factory.getEnvironmentVariables(); 31 | const provider: LLMConfig['provider'] = 'openrouter'; 32 | 33 | // Get API key from options or environment 34 | let apiKey = rawOptions.apiKey; 35 | if (!apiKey) { 36 | for (const envVar of envVars) { 37 | const value = process.env[envVar]; 38 | if (value) { 39 | apiKey = value; 40 | break; 41 | } 42 | } 43 | } 44 | 45 | // Get model from options, environment, or defaults 46 | let model = rawOptions.model; 47 | if (!model) { 48 | model = process.env.OPENROUTER_MODEL || factory.getDefaultModel() || fallbackModel; 49 | } 50 | 51 | // Validate API key exists 52 | if (!apiKey) { 53 | throw new Error( 54 | t('errors.fill.apiKeyMissing', { 55 | provider: provider.toUpperCase(), 56 | envVars: envVars.join(', ') 57 | }) 58 | ); 59 | } 60 | 61 | return { 62 | provider, 63 | model, 64 | apiKey, 65 | baseUrl: rawOptions.baseUrl 66 | }; 67 | } 68 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to @ai-coders/context 2 | 3 | We love your input! We want to make contributing to this project as easy and transparent as possible. 4 | 5 | ## Development Process 6 | 7 | We use GitHub to host code, to track issues and feature requests, as well as accept pull requests. 8 | 9 | 1. Fork the repo and create your branch from `main`. 10 | 2. If you've added code that should be tested, add tests. 11 | 3. If you've changed APIs, update the documentation. 12 | 4. Ensure the test suite passes. 13 | 5. Make sure your code lints. 14 | 6. Issue that pull request! 15 | 16 | ## Setting Up Development Environment 17 | 18 | ```bash 19 | # Clone your fork 20 | git clone https://github.com/YOUR_USERNAME/ai-coders-context.git 21 | cd ai-coders-context 22 | 23 | # Install dependencies 24 | npm install 25 | 26 | # Run in development mode 27 | npm run dev 28 | 29 | # Build the project 30 | npm run build 31 | 32 | # Run tests 33 | npm test 34 | ``` 35 | 36 | ## Pull Request Process 37 | 38 | 1. Update the README.md with details of changes to the interface, if applicable. 39 | 2. Update the package.json version following [SemVer](http://semver.org/). 40 | 3. The PR will be merged once you have the sign-off of at least one maintainer. 41 | 42 | ## Any contributions you make will be under the MIT Software License 43 | 44 | When you submit code changes, your submissions are understood to be under the same [MIT License](LICENSE) that covers the project. 45 | 46 | ## Report bugs using GitHub's issues 47 | 48 | We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/vinilana/ai-coders-context/issues/new). 49 | 50 | ## Write bug reports with detail, background, and sample code 51 | 52 | **Great Bug Reports** tend to have: 53 | 54 | - A quick summary and/or background 55 | - Steps to reproduce 56 | - Be specific! 57 | - Give sample code if you can 58 | - What you expected would happen 59 | - What actually happens 60 | - Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) 61 | 62 | ## License 63 | 64 | By contributing, you agree that your contributions will be licensed under its MIT License. -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@ai-coders/context", 3 | "version": "0.4.0", 4 | "description": "CLI tool for generating codebase documentation and AI agent prompts", 5 | "main": "dist/index.js", 6 | "bin": { 7 | "ai-context": "dist/index.js" 8 | }, 9 | "files": [ 10 | "dist/**/*", 11 | "README.md", 12 | "LICENSE", 13 | "prompts/**/*" 14 | ], 15 | "engines": { 16 | "node": ">=20.0.0" 17 | }, 18 | "scripts": { 19 | "build": "tsc", 20 | "dev": "tsx src/index.ts", 21 | "start": "node dist/index.js", 22 | "test": "jest", 23 | "prepublishOnly": "npm run build", 24 | "version": "npm run build", 25 | "release": "npm version patch && npm publish --access public", 26 | "release:minor": "npm version minor && npm publish --access public", 27 | "release:major": "npm version major && npm publish --access public" 28 | }, 29 | "publishConfig": { 30 | "access": "public", 31 | "registry": "https://registry.npmjs.org/" 32 | }, 33 | "repository": { 34 | "type": "git", 35 | "url": "git+https://github.com/vinilana/ai-coders-context.git" 36 | }, 37 | "homepage": "https://github.com/vinilana/ai-coders-context#readme", 38 | "bugs": { 39 | "url": "https://github.com/vinilana/ai-coders-context/issues" 40 | }, 41 | "keywords": [ 42 | "cli", 43 | "documentation", 44 | "agents", 45 | "scaffold" 46 | ], 47 | "author": "", 48 | "license": "MIT", 49 | "dependencies": { 50 | "axios": "^1.12.2", 51 | "boxen": "^5.1.2", 52 | "chalk": "^4.1.2", 53 | "cli-progress": "^3.12.0", 54 | "commander": "^14.0.1", 55 | "dotenv": "^17.2.2", 56 | "figures": "^3.2.0", 57 | "fs-extra": "^11.3.2", 58 | "glob": "^10.4.5", 59 | "inquirer": "^12.6.3", 60 | "ora": "^5.4.1", 61 | "semver": "^7.6.3" 62 | }, 63 | "devDependencies": { 64 | "@types/cli-progress": "^3.11.0", 65 | "@types/fs-extra": "^11.0.4", 66 | "@types/inquirer": "^9.0.8", 67 | "@types/jest": "^30.0.0", 68 | "@types/node": "^24.5.2", 69 | "@types/semver": "^7.5.8", 70 | "jest": "^30.1.3", 71 | "ts-jest": "^29.4.4", 72 | "tsx": "^4.20.6", 73 | "typescript": "^5.9.2" 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/common.ts: -------------------------------------------------------------------------------- 1 | import { DirectoryStat, DocumentationTemplateContext } from './types'; 2 | 3 | const KNOWN_DESCRIPTIONS: Record = { 4 | src: 'TypeScript source files and CLI entrypoints.', 5 | dist: 'Compiled JavaScript output generated by the build step.', 6 | docs: 'Living documentation produced by this tool.', 7 | agents: 'AI agent playbooks and prompts.', 8 | tests: 'Automated tests and fixtures.', 9 | packages: 'Workspace packages or modules.' 10 | }; 11 | 12 | export function formatDirectoryList( 13 | context: DocumentationTemplateContext, 14 | includePlaceholders: boolean 15 | ): string { 16 | if (context.topLevelDirectories.length === 0) { 17 | return ''; 18 | } 19 | 20 | return context.topLevelDirectories 21 | .map(dir => { 22 | const description = KNOWN_DESCRIPTIONS[dir]; 23 | if (description) { 24 | return `- \`${dir}/\` — ${description}`; 25 | } 26 | 27 | if (!includePlaceholders) { 28 | return `- \`${dir}/\``; 29 | } 30 | 31 | const slotId = slugify(dir); 32 | return `- \`${dir}/\` — TODO: Describe the purpose of this directory.`; 33 | }) 34 | .join('\n'); 35 | } 36 | 37 | export function buildDocumentMapTable(guides: DocumentationTemplateContext['guides']): string { 38 | const rows = guides.map(meta => `| ${meta.title} | \`${meta.file}\` | ${meta.marker} | ${meta.primaryInputs} |`); 39 | return ['| Guide | File | AI Marker | Primary Inputs |', '| --- | --- | --- | --- |', ...rows].join('\n'); 40 | } 41 | 42 | export function formatDirectoryStats(stats: DirectoryStat[]): string { 43 | if (!stats.length) { 44 | return '*No directories detected.*'; 45 | } 46 | 47 | return stats 48 | .map(stat => `- \`${stat.name}/\` — approximately ${stat.fileCount} files`) 49 | .join('\n'); 50 | } 51 | 52 | export function formatInlineDirectoryList(directories: string[]): string { 53 | if (!directories.length) { 54 | return '`n/a`'; 55 | } 56 | 57 | return directories.map(dir => `\`${dir}\``).join(', '); 58 | } 59 | 60 | export function slugify(value: string): string { 61 | return value 62 | .toLowerCase() 63 | .replace(/[^a-z0-9]+/g, '-') 64 | .replace(/^-+|-+$/g, ''); 65 | } 66 | -------------------------------------------------------------------------------- /src/generators/plans/templates/indexTemplate.ts: -------------------------------------------------------------------------------- 1 | import { PlanIndexEntry } from './types'; 2 | 3 | export function renderPlanIndex(entries: PlanIndexEntry[]): string { 4 | const planList = entries.length 5 | ? entries 6 | .map((entry, index) => `${index + 1}. [${entry.title}](./${entry.slug}.md)`) 7 | .join('\n') 8 | : '_No plans created yet. Use "ai-context plan " to create the first one._'; 9 | 10 | return `# Collaboration Plans 11 | 12 | This directory is the run queue for AI agents and maintainers coordinating work across documentation and playbooks. Treat the list below as an ordered backlog: finish the first plan before moving on to the next unless a human directs otherwise. 13 | 14 | ## Agent Execution Protocol 15 | 1. **Read the queue** from top to bottom. The numbering reflects execution priority. 16 | 2. **Open the plan file** (e.g., './plans/.md') and review the YAML front matter and the '' wrapper so you understand the goal, required inputs, and success criteria. 17 | 3. **Gather context** by visiting the linked documentation and agent playbooks referenced in the "Agent Lineup" and "Documentation Touchpoints" tables. 18 | 4. **Execute the stages** exactly as written, capturing evidence and updating linked docs as instructed. If a stage cannot be completed, record the reason inside the plan before pausing. 19 | 5. **Close out the plan** by updating any TODOs, recording outcomes in the "Evidence & Follow-up" section, and notifying maintainers if human review is required. 20 | 6. **Return here** and pick the next plan in the queue. Always leave the README and plan files consistent with the work performed. 21 | 22 | ## Plan Queue (process in order) 23 | ${planList} 24 | 25 | ## How To Create Or Update Plans 26 | - Run "ai-context plan " to scaffold a new plan template. 27 | - Run "ai-context plan --fill" (optionally with "--dry-run") to have an LLM refresh the plan using the latest repository context. 28 | - Cross-link any new documentation or agent resources you introduce so future runs stay discoverable. 29 | 30 | ## Related Resources 31 | - [Agent Handbook](../agents/README.md) 32 | - [Documentation Index](../docs/README.md) 33 | - [Agent Knowledge Base](../../AGENTS.md) 34 | - [Contributor Guidelines](../../CONTRIBUTING.md) 35 | `; 36 | } 37 | -------------------------------------------------------------------------------- /src/utils/versionChecker.ts: -------------------------------------------------------------------------------- 1 | import axios from 'axios'; 2 | import { gt } from 'semver'; 3 | 4 | import type { CLIInterface } from './cliUI'; 5 | import type { TranslateFn } from './i18n'; 6 | 7 | interface VersionCheckOptions { 8 | packageName: string; 9 | currentVersion: string; 10 | ui: Pick; 11 | t: TranslateFn; 12 | registryTimeoutMs?: number; 13 | fetcher?: (packageName: string, timeoutMs: number) => Promise; 14 | updateCommand?: string; 15 | force?: boolean; 16 | } 17 | 18 | const DEFAULT_TIMEOUT_MS = 2000; 19 | 20 | const DISABLE_ENV_FLAGS = ['AI_CONTEXT_DISABLE_UPDATE_CHECK', 'NO_UPDATE_NOTIFIER']; 21 | 22 | async function fetchLatestVersion(packageName: string, timeoutMs: number): Promise { 23 | const url = `https://registry.npmjs.org/${encodeURIComponent(packageName)}/latest`; 24 | const response = await axios.get<{ version?: string }>(url, { timeout: timeoutMs }); 25 | if (!response.data?.version) { 26 | throw new Error('missing-version'); 27 | } 28 | return response.data.version; 29 | } 30 | 31 | export async function checkForUpdates(options: VersionCheckOptions): Promise { 32 | const { 33 | packageName, 34 | currentVersion, 35 | ui, 36 | t, 37 | registryTimeoutMs = DEFAULT_TIMEOUT_MS, 38 | fetcher, 39 | updateCommand, 40 | force = false 41 | } = options; 42 | 43 | if (!force) { 44 | if (process.env.NODE_ENV === 'test' || process.env.CI === 'true') { 45 | return; 46 | } 47 | 48 | for (const envFlag of DISABLE_ENV_FLAGS) { 49 | const value = process.env[envFlag]; 50 | if (typeof value === 'string' && value.toLowerCase() !== 'false') { 51 | return; 52 | } 53 | } 54 | } 55 | 56 | try { 57 | const latestVersion = await (fetcher 58 | ? fetcher(packageName, registryTimeoutMs) 59 | : fetchLatestVersion(packageName, registryTimeoutMs)); 60 | 61 | if (gt(latestVersion, currentVersion)) { 62 | const command = updateCommand ?? `npm install -g ${packageName}`; 63 | ui.displayInfo( 64 | t('info.update.available.title'), 65 | t('info.update.available.detail', { 66 | latest: latestVersion, 67 | current: currentVersion, 68 | command 69 | }) 70 | ); 71 | } 72 | } catch (error) { 73 | // Swallow errors silently; update hints should never block the CLI. 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/utils/versionChecker.test.ts: -------------------------------------------------------------------------------- 1 | import { checkForUpdates } from './versionChecker'; 2 | import type { TranslateFn, TranslateParams } from './i18n'; 3 | 4 | describe('versionChecker', () => { 5 | const ui = { 6 | displayInfo: jest.fn() 7 | }; 8 | 9 | const t: TranslateFn = (key: string, params?: TranslateParams) => { 10 | const context = (params ?? {}) as Record; 11 | const latest = context.latest ?? ''; 12 | const current = context.current ?? ''; 13 | const command = context.command ?? ''; 14 | switch (key) { 15 | case 'info.update.available.title': 16 | return 'Update available'; 17 | case 'info.update.available.detail': 18 | return `Latest: ${latest}, current: ${current}, update with: ${command}`; 19 | default: 20 | return key; 21 | } 22 | }; 23 | 24 | beforeEach(() => { 25 | jest.clearAllMocks(); 26 | }); 27 | 28 | it('informs the user when a newer version exists', async () => { 29 | await checkForUpdates({ 30 | packageName: '@scope/test', 31 | currentVersion: '0.1.0', 32 | ui, 33 | t, 34 | fetcher: async (pkgName: string, timeoutMs: number) => { 35 | expect(pkgName).toBe('@scope/test'); 36 | expect(timeoutMs).toBeGreaterThan(0); 37 | return '0.2.0'; 38 | }, 39 | updateCommand: 'npm i -g @scope/test', 40 | force: true 41 | }); 42 | 43 | expect(ui.displayInfo).toHaveBeenCalledWith( 44 | 'Update available', 45 | 'Latest: 0.2.0, current: 0.1.0, update with: npm i -g @scope/test' 46 | ); 47 | }); 48 | 49 | it('does not notify when already on latest version', async () => { 50 | await checkForUpdates({ 51 | packageName: 'pkg', 52 | currentVersion: '1.0.0', 53 | ui, 54 | t, 55 | fetcher: async (_packageName, _timeout) => '1.0.0', 56 | force: true 57 | }); 58 | 59 | expect(ui.displayInfo).not.toHaveBeenCalled(); 60 | }); 61 | 62 | it('silently ignores registry failures', async () => { 63 | await expect( 64 | checkForUpdates({ 65 | packageName: 'pkg', 66 | currentVersion: '1.0.0', 67 | ui, 68 | t, 69 | fetcher: async (_packageName, _timeout) => { 70 | throw new Error('network'); 71 | }, 72 | force: true 73 | }) 74 | ).resolves.toBeUndefined(); 75 | 76 | expect(ui.displayInfo).not.toHaveBeenCalled(); 77 | }); 78 | }); 79 | -------------------------------------------------------------------------------- /prompts/update_plan_prompt.md: -------------------------------------------------------------------------------- 1 | # Prompt: Update Collaboration Plans 2 | 3 | ## Purpose 4 | You are an AI assistant responsible for refining collaboration plans that live in the `.context/plans/` directory. Each plan orchestrates work across documentation guides (`docs/`) and agent playbooks (`agents/`). Your goal is to replace placeholders with actionable guidance that keeps the plan aligned with the referenced docs, agents, and repository context. 5 | 6 | ## Preparation Checklist 7 | 1. Review the plan’s YAML front matter to understand the stated `ai_update_goal`, `required_inputs`, and `success_criteria`. 8 | 2. Inspect the provided documentation excerpts (from `docs/`) and agent playbooks to ensure the plan reflects their current guidance. 9 | 3. Confirm that the “Agent Lineup” and “Documentation Touchpoints” tables link to real files and reference the correct `agent-update` markers. 10 | 4. Note any TODOs, `agent-fill` placeholders, or missing evidence sections that must be resolved. 11 | 12 | ## Update Procedure 13 | 1. **Task Snapshot** 14 | - Summarize the primary goal and success signal in concrete terms. 15 | - List authoritative references (docs, issues, specs) that contributors should consult. 16 | 17 | 2. **Agent Alignment** 18 | - For each agent in the lineup, describe why they are involved and call out the first responsibility they should focus on. 19 | - Ensure playbook links and responsibility summaries match the referenced agent files. 20 | 21 | 3. **Documentation Touchpoints** 22 | - Map each plan stage to the docs excerpts provided, highlighting which sections need to be updated during execution. 23 | - Keep the table sorted and ensure the listed `agent-update` markers exist. 24 | 25 | 4. **Working Stages** 26 | - Break the work into clear stages with owners, deliverables, and evidence checkpoints. 27 | - Reference documentation and agent resources that the team should consult while executing each stage. 28 | 29 | 5. **Evidence & Follow-up** 30 | - Specify the artefacts that must be captured (PR links, test runs, change logs) before the plan is considered complete. 31 | - Record any follow-up actions or decisions that require human confirmation. 32 | 33 | ## Acceptance Criteria 34 | - Every TODO or placeholder inside the plan’s `agent-update` block is resolved or accompanied by a clear escalation note. 35 | - Tables reference existing files and stay in sync with the docs/agent indices. 36 | - Stages provide actionable guidance, owners, and success signals. 37 | - The plan remains fully self-contained and ready for contributors to execute. 38 | 39 | ## Deliverables 40 | - Updated plan Markdown returned verbatim. 41 | - No additional commentary outside the Markdown output. 42 | -------------------------------------------------------------------------------- /src/utils/promptLoader.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as fs from 'fs-extra'; 3 | 4 | import { UPDATE_PLAN_PROMPT_FALLBACK, UPDATE_SCAFFOLD_PROMPT_FALLBACK } from '../prompts/defaults'; 5 | 6 | type FileSystem = typeof fs; 7 | 8 | export type PromptSource = 'custom' | 'package' | 'builtin'; 9 | 10 | export interface PromptResolution { 11 | content: string; 12 | source: PromptSource; 13 | path?: string; 14 | } 15 | 16 | interface ResolvePromptOptions { 17 | customPath?: string; 18 | fallbackFileName: string; 19 | fallbackContent: string; 20 | missingCustomPromptMessage: (resolvedPath: string) => string; 21 | fsModule?: FileSystem; 22 | } 23 | 24 | async function resolvePrompt({ 25 | customPath, 26 | fallbackFileName, 27 | fallbackContent, 28 | missingCustomPromptMessage, 29 | fsModule 30 | }: ResolvePromptOptions): Promise { 31 | const fileSystem = fsModule ?? fs; 32 | 33 | if (customPath) { 34 | const resolvedCustomPath = path.resolve(customPath); 35 | 36 | if (!(await fileSystem.pathExists(resolvedCustomPath))) { 37 | throw new Error(missingCustomPromptMessage(resolvedCustomPath)); 38 | } 39 | 40 | const content = await fileSystem.readFile(resolvedCustomPath, 'utf-8'); 41 | return { content, source: 'custom', path: resolvedCustomPath }; 42 | } 43 | 44 | const candidatePaths = [ 45 | path.resolve(__dirname, '../../prompts', fallbackFileName), 46 | path.resolve(__dirname, '../prompts', fallbackFileName) 47 | ]; 48 | 49 | for (const candidatePath of candidatePaths) { 50 | if (await fileSystem.pathExists(candidatePath)) { 51 | const content = await fileSystem.readFile(candidatePath, 'utf-8'); 52 | return { content, source: 'package', path: candidatePath }; 53 | } 54 | } 55 | 56 | return { content: fallbackContent, source: 'builtin' }; 57 | } 58 | 59 | export function resolveScaffoldPrompt( 60 | customPath: string | undefined, 61 | missingCustomPromptMessage: (resolvedPath: string) => string, 62 | fsModule?: FileSystem 63 | ): Promise { 64 | return resolvePrompt({ 65 | customPath, 66 | fallbackFileName: 'update_scaffold_prompt.md', 67 | fallbackContent: UPDATE_SCAFFOLD_PROMPT_FALLBACK, 68 | missingCustomPromptMessage, 69 | fsModule 70 | }); 71 | } 72 | 73 | export function resolvePlanPrompt( 74 | customPath: string | undefined, 75 | missingCustomPromptMessage: (resolvedPath: string) => string, 76 | fsModule?: FileSystem 77 | ): Promise { 78 | return resolvePrompt({ 79 | customPath, 80 | fallbackFileName: 'update_plan_prompt.md', 81 | fallbackContent: UPDATE_PLAN_PROMPT_FALLBACK, 82 | missingCustomPromptMessage, 83 | fsModule 84 | }); 85 | } 86 | -------------------------------------------------------------------------------- /src/generators/documentation/guideRegistry.ts: -------------------------------------------------------------------------------- 1 | import { GuideMeta } from './templates/types'; 2 | 3 | export const DOCUMENT_GUIDES: GuideMeta[] = [ 4 | { 5 | key: 'project-overview', 6 | title: 'Project Overview', 7 | file: 'project-overview.md', 8 | marker: 'agent-update:project-overview', 9 | primaryInputs: 'Roadmap, README, stakeholder notes' 10 | }, 11 | { 12 | key: 'architecture', 13 | title: 'Architecture Notes', 14 | file: 'architecture.md', 15 | marker: 'agent-update:architecture-notes', 16 | primaryInputs: 'ADRs, service boundaries, dependency graphs' 17 | }, 18 | { 19 | key: 'development-workflow', 20 | title: 'Development Workflow', 21 | file: 'development-workflow.md', 22 | marker: 'agent-update:development-workflow', 23 | primaryInputs: 'Branching rules, CI config, contributing guide' 24 | }, 25 | { 26 | key: 'testing-strategy', 27 | title: 'Testing Strategy', 28 | file: 'testing-strategy.md', 29 | marker: 'agent-update:testing-strategy', 30 | primaryInputs: 'Test configs, CI gates, known flaky suites' 31 | }, 32 | { 33 | key: 'glossary', 34 | title: 'Glossary & Domain Concepts', 35 | file: 'glossary.md', 36 | marker: 'agent-update:glossary', 37 | primaryInputs: 'Business terminology, user personas, domain rules' 38 | }, 39 | { 40 | key: 'data-flow', 41 | title: 'Data Flow & Integrations', 42 | file: 'data-flow.md', 43 | marker: 'agent-update:data-flow', 44 | primaryInputs: 'System diagrams, integration specs, queue topics' 45 | }, 46 | { 47 | key: 'security', 48 | title: 'Security & Compliance Notes', 49 | file: 'security.md', 50 | marker: 'agent-update:security', 51 | primaryInputs: 'Auth model, secrets management, compliance requirements' 52 | }, 53 | { 54 | key: 'tooling', 55 | title: 'Tooling & Productivity Guide', 56 | file: 'tooling.md', 57 | marker: 'agent-update:tooling', 58 | primaryInputs: 'CLI scripts, IDE configs, automation workflows' 59 | } 60 | ]; 61 | 62 | export const DOCUMENT_GUIDE_KEYS = DOCUMENT_GUIDES.map(guide => guide.key); 63 | 64 | export function getGuidesByKeys(keys?: string[]): GuideMeta[] { 65 | if (!keys || keys.length === 0) { 66 | return DOCUMENT_GUIDES; 67 | } 68 | 69 | const set = new Set(keys); 70 | const filtered = DOCUMENT_GUIDES.filter(guide => set.has(guide.key)); 71 | return filtered.length > 0 ? filtered : DOCUMENT_GUIDES; 72 | } 73 | 74 | export function getDocFilesByKeys(keys?: string[]): Set | undefined { 75 | if (!keys || keys.length === 0) { 76 | return undefined; 77 | } 78 | const files = DOCUMENT_GUIDES 79 | .filter(guide => keys.includes(guide.key)) 80 | .map(guide => guide.file); 81 | return files.length ? new Set(files) : undefined; 82 | } 83 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/projectOverviewTemplate.ts: -------------------------------------------------------------------------------- 1 | import { DocumentationTemplateContext } from './types'; 2 | import { formatDirectoryList } from './common'; 3 | 4 | export function renderProjectOverview(context: DocumentationTemplateContext): string { 5 | 6 | const directoryList = formatDirectoryList(context, true); 7 | const languageSummary = context.primaryLanguages.length > 0 8 | ? context.primaryLanguages.map(lang => `- ${lang.extension} (${lang.count} files)`).join('\n') 9 | : '- Language mix pending analysis.'; 10 | 11 | return ` 12 | 13 | # Project Overview 14 | 15 | > TODO: Summarize the problem this project solves and who benefits from it. 16 | 17 | ## Quick Facts 18 | - Root path: \`${context.repoStructure.rootPath}\` 19 | - Primary languages detected: 20 | ${languageSummary} 21 | 22 | ## File Structure & Code Organization 23 | ${directoryList || '*Add a short description for each relevant directory.*'} 24 | 25 | ## Technology Stack Summary 26 | - Outline primary runtimes, languages, and platforms in use. 27 | - Note build tooling, linting, and formatting infrastructure the team relies on. 28 | 29 | ## Core Framework Stack 30 | - Document core frameworks per layer (backend, frontend, data, messaging). 31 | - Mention architectural patterns enforced by these frameworks. 32 | 33 | ## UI & Interaction Libraries 34 | - List UI kits, CLI interaction helpers, or design system dependencies. 35 | - Note theming, accessibility, or localization considerations contributors must follow. 36 | 37 | ## Development Tools Overview 38 | - Highlight essential CLIs, scripts, or developer environments. 39 | - Link to [Tooling & Productivity Guide](./tooling.md) for deeper setup instructions. 40 | 41 | ## Getting Started Checklist 42 | 1. Install dependencies with \`npm install\`. 43 | 2. Explore the CLI by running \`npm run dev\`. 44 | 3. Review [Development Workflow](./development-workflow.md) for day-to-day tasks. 45 | 46 | ## Next Steps 47 | Capture product positioning, key stakeholders, and links to external documentation or product specs here. 48 | 49 | 50 | ## AI Update Checklist 51 | 1. Review roadmap items or issues labelled “release” to confirm current goals. 52 | 2. Cross-check Quick Facts against \`package.json\` and environment docs. 53 | 3. Refresh the File Structure & Code Organization section to reflect new or retired modules; keep guidance actionable. 54 | 4. Link critical dashboards, specs, or runbooks used by the team. 55 | 5. Flag any details that require human confirmation (e.g., stakeholder ownership). 56 | 57 | 58 | ## Acceptable Sources 59 | - Recent commits, release notes, or ADRs describing high-level changes. 60 | - Product requirement documents linked from this repository. 61 | - Confirmed statements from maintainers or product leads. 62 | 63 | 64 | `; 65 | } 66 | -------------------------------------------------------------------------------- /src/generators/plans/planGenerator.test.ts: -------------------------------------------------------------------------------- 1 | import * as os from 'os'; 2 | import * as path from 'path'; 3 | import * as fs from 'fs-extra'; 4 | 5 | import { PlanGenerator } from './planGenerator'; 6 | 7 | function createTempOutput(prefix: string): Promise { 8 | return fs.mkdtemp(path.join(os.tmpdir(), prefix)); 9 | } 10 | 11 | describe('PlanGenerator', () => { 12 | let tempDir: string; 13 | let outputDir: string; 14 | let generator: PlanGenerator; 15 | 16 | beforeEach(async () => { 17 | tempDir = await createTempOutput('ai-context-plans-'); 18 | outputDir = path.join(tempDir, '.context'); 19 | generator = new PlanGenerator(); 20 | }); 21 | 22 | afterEach(async () => { 23 | if (tempDir) { 24 | await fs.remove(tempDir); 25 | } 26 | }); 27 | 28 | it('creates a plan file and updates the index', async () => { 29 | const result = await generator.generatePlan({ 30 | planName: 'New Initiative', 31 | outputDir 32 | }); 33 | 34 | expect(result.slug).toBe('new-initiative'); 35 | 36 | const planPath = path.join(outputDir, 'plans', 'new-initiative.md'); 37 | expect(await fs.pathExists(planPath)).toBe(true); 38 | 39 | const content = await fs.readFile(planPath, 'utf8'); 40 | expect(content).toContain('id: plan-new-initiative'); 41 | expect(content).toContain(''); 42 | expect(content).toContain('# New Initiative Plan'); 43 | expect(content).toContain('## Working Phases'); 44 | expect(content).toContain('**Commit Checkpoint**'); 45 | 46 | const indexContent = await fs.readFile(path.join(outputDir, 'plans', 'README.md'), 'utf8'); 47 | expect(indexContent).toContain('1. [New Initiative](./new-initiative.md)'); 48 | }); 49 | 50 | it('respects selected agents and docs and supports force overwrite', async () => { 51 | const options = { 52 | planName: 'Release Readiness', 53 | outputDir, 54 | selectedAgentTypes: ['test-writer'], 55 | selectedDocKeys: ['testing-strategy'] 56 | }; 57 | 58 | const firstResult = await generator.generatePlan(options); 59 | const planPath = path.join(outputDir, 'plans', 'release-readiness.md'); 60 | const content = await fs.readFile(planPath, 'utf8'); 61 | 62 | expect(content).toContain('related_agents:'); 63 | expect(content).toContain(' - "test-writer"'); 64 | expect(content).toContain('[Test Writer](../agents/test-writer.md)'); 65 | expect(content).toContain('[testing-strategy.md](../docs/testing-strategy.md)'); 66 | 67 | await expect(generator.generatePlan(options)).rejects.toThrow('Plan already exists'); 68 | 69 | const forcedResult = await generator.generatePlan({ ...options, force: true, summary: 'Dry run' }); 70 | expect(forcedResult.slug).toBe(firstResult.slug); 71 | 72 | const updatedContent = await fs.readFile(planPath, 'utf8'); 73 | expect(updatedContent).toContain('Dry run'); 74 | }); 75 | }); 76 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/architectureTemplate.ts: -------------------------------------------------------------------------------- 1 | import { DocumentationTemplateContext } from './types'; 2 | import { formatDirectoryStats } from './common'; 3 | 4 | export function renderArchitectureNotes(context: DocumentationTemplateContext): string { 5 | const directorySnapshot = formatDirectoryStats(context.directoryStats); 6 | const coreComponentsSection = directorySnapshot || '- *Add notes for each core component or module.*'; 7 | 8 | return ` 9 | # Architecture Notes 10 | 11 | > TODO: Describe how the system is assembled and why the current design exists. 12 | 13 | ## System Architecture Overview 14 | - Summarize the top-level topology (monolith, modular service, microservices) and deployment model. 15 | - Highlight how requests traverse the system and where control pivots between layers. 16 | 17 | ## Core System Components 18 | ${coreComponentsSection} 19 | 20 | ## Internal System Boundaries 21 | - Document seams between domains, bounded contexts, or service ownership. 22 | - Note data ownership, synchronization strategies, and shared contract enforcement. 23 | 24 | ## System Integration Points 25 | - Map inbound interfaces (APIs, events, webhooks) and the modules that own them. 26 | - Capture orchestration touchpoints where this system calls or coordinates other internal services. 27 | 28 | ## External Service Dependencies 29 | - List SaaS platforms, third-party APIs, or infrastructure services the system relies on. 30 | - Describe authentication methods, rate limits, and failure considerations for each dependency. 31 | 32 | ## Key Decisions & Trade-offs 33 | - Summarize architectural decisions, experiments, or ADR outcomes that shape the current design. 34 | - Reference supporting documents and explain why selected approaches won over alternatives. 35 | 36 | ## Diagrams 37 | - Link architectural diagrams or add mermaid definitions here. 38 | 39 | ## Risks & Constraints 40 | - Document performance constraints, scaling considerations, or external system assumptions. 41 | 42 | ## Top Directories Snapshot 43 | ${directorySnapshot} 44 | 45 | 46 | ## AI Update Checklist 47 | 1. Review ADRs, design docs, or major PRs for architectural changes. 48 | 2. Verify that each documented decision still holds; mark superseded choices clearly. 49 | 3. Capture upstream/downstream impacts (APIs, events, data flows). 50 | 4. Update Risks & Constraints with active incident learnings or TODO debt. 51 | 5. Link any new diagrams or dashboards referenced in recent work. 52 | 53 | 54 | ## Acceptable Sources 55 | - ADR folders, \`/docs/architecture\` notes, or RFC threads. 56 | - Dependency visualisations from build tooling or scripts. 57 | - Issue tracker discussions vetted by maintainers. 58 | 59 | ## Related Resources 60 | - [Project Overview](./project-overview.md) 61 | - Update [agents/README.md](../agents/README.md) when architecture changes. 62 | 63 | 64 | `; 65 | } 66 | -------------------------------------------------------------------------------- /src/runInit.integration.test.ts: -------------------------------------------------------------------------------- 1 | import * as os from 'os'; 2 | import * as path from 'path'; 3 | import * as fs from 'fs-extra'; 4 | 5 | import { runInit } from './index'; 6 | 7 | async function createFixtureRepo(): Promise<{ repoPath: string; cleanup: () => Promise }> { 8 | const tempRoot = await fs.mkdtemp(path.join(os.tmpdir(), 'ai-context-runinit-')); 9 | const repoPath = path.join(tempRoot, 'repo'); 10 | await fs.ensureDir(repoPath); 11 | await fs.ensureDir(path.join(repoPath, 'src')); 12 | await fs.ensureDir(path.join(repoPath, 'tests')); 13 | await fs.writeFile(path.join(repoPath, 'src', 'index.ts'), "export const hello = 'world';\n"); 14 | await fs.writeFile(path.join(repoPath, 'package.json'), '{"name":"fixture","version":"0.0.0"}\n'); 15 | await fs.writeFile(path.join(repoPath, 'README.md'), '# Fixture Repo\n'); 16 | 17 | return { 18 | repoPath, 19 | cleanup: () => fs.remove(tempRoot) 20 | }; 21 | } 22 | 23 | describe('runInit integration', () => { 24 | let cleanup: (() => Promise) | undefined; 25 | 26 | afterEach(async () => { 27 | if (cleanup) { 28 | await cleanup(); 29 | cleanup = undefined; 30 | } 31 | }); 32 | 33 | it('scaffolds documentation and agents when both are requested', async () => { 34 | const fixture = await createFixtureRepo(); 35 | cleanup = fixture.cleanup; 36 | const outputDir = path.join(fixture.repoPath, '..', '.context-all'); 37 | 38 | await runInit(fixture.repoPath, 'both', { output: outputDir }); 39 | 40 | const docsDir = path.join(outputDir, 'docs'); 41 | const agentsDir = path.join(outputDir, 'agents'); 42 | 43 | expect(await fs.pathExists(docsDir)).toBe(true); 44 | expect(await fs.pathExists(agentsDir)).toBe(true); 45 | 46 | const docsIndex = await fs.readFile(path.join(docsDir, 'README.md'), 'utf8'); 47 | expect(docsIndex).toContain(''); 48 | expect(docsIndex).toContain('Repository Snapshot'); 49 | 50 | const agentIndex = await fs.readFile(path.join(agentsDir, 'README.md'), 'utf8'); 51 | expect(agentIndex).toContain('# Agent Handbook'); 52 | expect(agentIndex).toContain('[Documentation Writer]'); 53 | 54 | const generatedAgents = await fs.readdir(agentsDir); 55 | expect(generatedAgents).toContain('code-reviewer.md'); 56 | }); 57 | 58 | it('supports docs-only scaffolding without creating agent assets', async () => { 59 | const fixture = await createFixtureRepo(); 60 | cleanup = fixture.cleanup; 61 | const outputDir = path.join(fixture.repoPath, '..', '.context-docs'); 62 | 63 | await runInit(fixture.repoPath, 'docs', { output: outputDir }); 64 | 65 | const docsDir = path.join(outputDir, 'docs'); 66 | const agentsDir = path.join(outputDir, 'agents'); 67 | 68 | expect(await fs.pathExists(docsDir)).toBe(true); 69 | expect(await fs.pathExists(path.join(docsDir, 'project-overview.md'))).toBe(true); 70 | expect(await fs.pathExists(agentsDir)).toBe(false); 71 | }); 72 | }); 73 | -------------------------------------------------------------------------------- /prompts/update_scaffold_prompt.md: -------------------------------------------------------------------------------- 1 | # Prompt: Update Repository Documentation and Agent Playbooks 2 | 3 | ## Purpose 4 | You are an AI assistant responsible for refreshing the documentation (`docs/`) and agent playbooks (`agents/`) generated by the ai-context scaffolding tool. Your goal is to bring every guide up to date with the latest repository state, fill in placeholder sections, and maintain cross-references between docs and agent instructions. 5 | 6 | ## Context Gathering Checklist 7 | 1. Run `git status -sb` to understand pending changes. 8 | 2. Review the latest merged commits or PRs related to documentation, architecture, workflow, or testing. 9 | 3. Inspect `package.json`, CI configuration, and any release or roadmap notes stored in the repository. 10 | 4. Check `docs/README.md` for the current document map and update AI markers (`agent-update:*`). 11 | 5. Identify unresolved placeholders marked as ``. 12 | 13 | ## Update Procedure 14 | 1. **Select a Guide** 15 | - Navigate to `docs/.md`. 16 | - Read the YAML front matter (`ai_update_goal`, `required_inputs`, `success_criteria`) and ensure you collect the listed inputs before editing. 17 | 18 | 2. **Edit Within Update Wrappers** 19 | - Update content strictly inside the matching `` block and keep the closing `` tag. 20 | - Remove or replace any `TODO` text with accurate, current information. 21 | - When you complete a placeholder slot (``), remove the wrapper and provide the finalized description. 22 | 23 | 3. **Cross-Link Updates** 24 | - Verify that links between docs remain valid. 25 | - If you add new guides or sections, update `docs/README.md` and the document map table. 26 | 27 | 4. **Agent Playbook Alignment** 28 | - For each change in `docs/`, adjust the related `agents/*.md` playbooks. 29 | - Ensure the "Documentation Touchpoints" list references the correct `agent-update` markers. 30 | - Update collaboration checklists and evidence sections to reflect the latest workflows. 31 | 32 | 5. **Evidence & Traceability** 33 | - Note key sources (commit hashes, issues, ADRs) in the "Evidence to Capture" or "AI Update Checklist" summary lines. 34 | - If anything is ambiguous or requires human follow-up, leave a concise note clearly labelled for maintainers. 35 | 36 | ## Acceptance Criteria 37 | - Every guide’s `success_criteria` field is satisfied. 38 | - No unresolved `TODO` or `agent-fill` blocks remain unless they require explicit human input; in such cases, add a comment explaining the dependency. 39 | - Agent playbooks list accurate responsibilities, best practices, and pointer links to the refreshed docs. 40 | - Changes are self-contained, well-formatted Markdown, and reference any new external resources introduced. 41 | 42 | ## Deliverables 43 | - Updated Markdown files committed to the repository. 44 | - A short changelog or PR description summarizing: 45 | - Guides touched 46 | - Key decisions or facts added 47 | - Evidence links and any pending follow-ups 48 | -------------------------------------------------------------------------------- /src/generators/agents/agentGenerator.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import { RepoStructure } from '../../types'; 3 | import { GeneratorUtils } from '../shared'; 4 | import { AGENT_TYPES, AgentType } from './agentTypes'; 5 | import { renderAgentPlaybook, renderAgentIndex } from './templates'; 6 | import { DOCUMENT_GUIDES } from '../documentation/guideRegistry'; 7 | 8 | interface AgentContext { 9 | topLevelDirectories: string[]; 10 | } 11 | 12 | interface DocTouchpoint { 13 | title: string; 14 | path: string; 15 | marker: string; 16 | } 17 | 18 | export class AgentGenerator { 19 | private readonly docTouchpoints: DocTouchpoint[] = [ 20 | { 21 | title: 'Documentation Index', 22 | path: '../docs/README.md', 23 | marker: 'agent-update:docs-index' 24 | }, 25 | ...DOCUMENT_GUIDES.map(guide => ({ 26 | title: guide.title, 27 | path: `../docs/${guide.file}`, 28 | marker: guide.marker 29 | })) 30 | ]; 31 | 32 | constructor(..._legacyArgs: unknown[]) {} 33 | 34 | 35 | async generateAgentPrompts( 36 | repoStructure: RepoStructure, 37 | outputDir: string, 38 | selectedAgentTypes?: string[], 39 | verbose: boolean = false 40 | ): Promise { 41 | const agentsDir = path.join(outputDir, 'agents'); 42 | await GeneratorUtils.ensureDirectoryAndLog(agentsDir, verbose, 'Generating agent scaffold in'); 43 | 44 | const context = this.buildContext(repoStructure); 45 | const agentTypes = this.resolveAgentSelection(selectedAgentTypes); 46 | 47 | let created = 0; 48 | for (const agentType of agentTypes) { 49 | const content = renderAgentPlaybook(agentType, context.topLevelDirectories, this.docTouchpoints); 50 | const filePath = path.join(agentsDir, `${agentType}.md`); 51 | await GeneratorUtils.writeFileWithLogging(filePath, content, verbose, `Created ${agentType}.md`); 52 | created += 1; 53 | } 54 | 55 | const indexPath = path.join(agentsDir, 'README.md'); 56 | const indexContent = renderAgentIndex(agentTypes); 57 | await GeneratorUtils.writeFileWithLogging(indexPath, indexContent, verbose, 'Created README.md'); 58 | created += 1; 59 | 60 | return created; 61 | } 62 | 63 | private resolveAgentSelection(selected?: string[]): readonly AgentType[] { 64 | if (!selected || selected.length === 0) { 65 | return AGENT_TYPES; 66 | } 67 | 68 | const allowed = new Set(AGENT_TYPES); 69 | const filtered = selected.filter((agent): agent is AgentType => allowed.has(agent as AgentType)); 70 | return (filtered.length > 0 ? filtered : AGENT_TYPES) as readonly AgentType[]; 71 | } 72 | 73 | private buildContext(repoStructure: RepoStructure): AgentContext { 74 | const directorySet = new Set(); 75 | 76 | repoStructure.directories.forEach(dir => { 77 | const [firstSegment] = dir.relativePath.split(/[\\/]/).filter(Boolean); 78 | if (firstSegment) { 79 | directorySet.add(firstSegment); 80 | } 81 | }); 82 | 83 | return { 84 | topLevelDirectories: Array.from(directorySet).sort() 85 | }; 86 | } 87 | 88 | } 89 | -------------------------------------------------------------------------------- /src/services/baseLLMClient.ts: -------------------------------------------------------------------------------- 1 | import { UsageStats } from '../types'; 2 | 3 | export abstract class BaseLLMClient { 4 | protected usageStats: UsageStats; 5 | 6 | constructor(model: string) { 7 | this.usageStats = { 8 | totalCalls: 0, 9 | totalPromptTokens: 0, 10 | totalCompletionTokens: 0, 11 | totalTokens: 0, 12 | model 13 | }; 14 | } 15 | 16 | abstract generateText(prompt: string, systemPrompt?: string): Promise; 17 | 18 | async generateDocumentation( 19 | codeContent: string, 20 | filePath: string, 21 | context: string 22 | ): Promise { 23 | const systemPrompt = `You are a technical documentation expert. Generate clear, comprehensive documentation for the provided code file. Include: 24 | 1. Purpose and overview 25 | 2. Key components/functions 26 | 3. Dependencies and relationships 27 | 4. Usage examples where applicable 28 | 5. Important notes or gotchas 29 | 30 | Focus on being practical and helpful for developers working with this codebase.`; 31 | 32 | const prompt = `File: ${filePath} 33 | 34 | Context: ${context} 35 | 36 | Code: 37 | \`\`\` 38 | ${codeContent} 39 | \`\`\` 40 | 41 | Generate comprehensive documentation for this file.`; 42 | 43 | return this.generateText(prompt, systemPrompt); 44 | } 45 | 46 | async generateAgentPrompt( 47 | repoStructure: string, 48 | fileContext: string, 49 | agentType: string 50 | ): Promise { 51 | const systemPrompt = `You are an expert at creating AI agent prompts for software development. Create a specialized prompt that would help an AI assistant understand and work effectively with this specific codebase. 52 | 53 | The prompt should include: 54 | 1. Clear understanding of the codebase structure and patterns 55 | 2. Key conventions and best practices used in this project 56 | 3. Important files and their purposes 57 | 4. Common tasks and workflows 58 | 5. Specific guidance for the agent type requested 59 | 60 | Make the prompt practical and actionable.`; 61 | 62 | const prompt = `Codebase Structure: 63 | ${repoStructure} 64 | 65 | File Context: 66 | ${fileContext} 67 | 68 | Agent Type: ${agentType} 69 | 70 | Generate a comprehensive agent prompt that would help an AI assistant work effectively with this codebase for ${agentType} tasks.`; 71 | 72 | return this.generateText(prompt, systemPrompt); 73 | } 74 | 75 | getUsageStats(): UsageStats { 76 | return { ...this.usageStats }; 77 | } 78 | 79 | resetUsageStats(): void { 80 | const model = this.usageStats.model; 81 | this.usageStats = { 82 | totalCalls: 0, 83 | totalPromptTokens: 0, 84 | totalCompletionTokens: 0, 85 | totalTokens: 0, 86 | model 87 | }; 88 | } 89 | 90 | protected trackUsage(usage: any): void { 91 | if (usage) { 92 | this.usageStats.totalCalls++; 93 | this.usageStats.totalPromptTokens += usage.prompt_tokens || usage.input_tokens || 0; 94 | this.usageStats.totalCompletionTokens += usage.completion_tokens || usage.output_tokens || 0; 95 | this.usageStats.totalTokens += usage.total_tokens || (usage.prompt_tokens + usage.completion_tokens) || 0; 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/utils/promptLoader.test.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as os from 'os'; 3 | import * as fs from 'fs-extra'; 4 | 5 | import { 6 | resolvePlanPrompt, 7 | resolveScaffoldPrompt, 8 | PromptSource 9 | } from './promptLoader'; 10 | import { 11 | UPDATE_PLAN_PROMPT_FALLBACK, 12 | UPDATE_SCAFFOLD_PROMPT_FALLBACK 13 | } from '../prompts/defaults'; 14 | 15 | describe('promptLoader', () => { 16 | const messageFactory = (resolvedPath: string) => `Missing prompt at ${resolvedPath}`; 17 | 18 | afterEach(() => { 19 | jest.restoreAllMocks(); 20 | }); 21 | 22 | describe('resolveScaffoldPrompt', () => { 23 | it('returns custom prompt content when a custom path is provided', async () => { 24 | const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'prompt-loader-')); 25 | const customPath = path.join(tempDir, 'custom-prompt.md'); 26 | await fs.writeFile(customPath, 'custom override prompt'); 27 | 28 | const result = await resolveScaffoldPrompt(customPath, messageFactory); 29 | 30 | expect(result).toEqual({ 31 | content: 'custom override prompt', 32 | source: 'custom' as PromptSource, 33 | path: customPath 34 | }); 35 | 36 | await fs.remove(tempDir); 37 | }); 38 | 39 | it('loads the packaged prompt when the repository prompt file is available', async () => { 40 | const result = await resolveScaffoldPrompt(undefined, messageFactory); 41 | 42 | const packagedPath = path.resolve(__dirname, '../../prompts/update_scaffold_prompt.md'); 43 | const packagedContent = await fs.readFile(packagedPath, 'utf-8'); 44 | 45 | expect(result.source).toBe('package'); 46 | expect(result.path).toBe(packagedPath); 47 | expect(result.content).toBe(packagedContent); 48 | }); 49 | 50 | it('falls back to the built-in prompt when no file exists', async () => { 51 | const fakeFs = { 52 | pathExists: jest.fn(async () => false), 53 | readFile: jest.fn() 54 | } as unknown as typeof fs; 55 | 56 | const result = await resolveScaffoldPrompt(undefined, messageFactory, fakeFs); 57 | 58 | expect(fakeFs.pathExists).toHaveBeenCalled(); 59 | expect(fakeFs.readFile).not.toHaveBeenCalled(); 60 | expect(result).toEqual({ 61 | content: UPDATE_SCAFFOLD_PROMPT_FALLBACK, 62 | source: 'builtin' 63 | }); 64 | }); 65 | 66 | it('throws a helpful error when a custom prompt is missing', async () => { 67 | const nonexistentPath = path.join(os.tmpdir(), 'does-not-exist.md'); 68 | await expect(resolveScaffoldPrompt(nonexistentPath, messageFactory)).rejects.toThrow( 69 | `Missing prompt at ${path.resolve(nonexistentPath)}` 70 | ); 71 | }); 72 | }); 73 | 74 | describe('resolvePlanPrompt', () => { 75 | it('falls back to the built-in plan prompt when nothing is found', async () => { 76 | const fakeFs = { 77 | pathExists: jest.fn(async () => false), 78 | readFile: jest.fn() 79 | } as unknown as typeof fs; 80 | 81 | const result = await resolvePlanPrompt(undefined, messageFactory, fakeFs); 82 | 83 | expect(result).toEqual({ 84 | content: UPDATE_PLAN_PROMPT_FALLBACK, 85 | source: 'builtin' 86 | }); 87 | }); 88 | }); 89 | }); 90 | -------------------------------------------------------------------------------- /src/cli.test.ts: -------------------------------------------------------------------------------- 1 | import { execSync } from 'child_process'; 2 | import * as path from 'path'; 3 | 4 | describe('CLI Commands', () => { 5 | const cliPath = path.join(__dirname, '../dist/index.js'); 6 | 7 | beforeAll(() => { 8 | // Build the project before running tests 9 | execSync('npm run build', { stdio: 'pipe' }); 10 | }); 11 | 12 | describe('Main CLI', () => { 13 | it('should display help when --help flag is used', () => { 14 | const output = execSync(`node ${cliPath} --help`, { encoding: 'utf8' }); 15 | expect(output).toContain('Scaffold documentation and agent playbooks'); 16 | expect(output).toContain('Commands:'); 17 | expect(output).toContain('init'); 18 | expect(output).toContain('fill'); 19 | expect(output).toContain('plan'); 20 | }); 21 | 22 | it('should display version when --version flag is used', () => { 23 | const output = execSync(`node ${cliPath} --version`, { encoding: 'utf8' }); 24 | expect(output).toMatch(/\d+\.\d+\.\d+/); 25 | }); 26 | }); 27 | 28 | describe('init command', () => { 29 | it('should display help for init command', () => { 30 | const output = execSync(`node ${cliPath} init --help`, { encoding: 'utf8' }); 31 | expect(output).toContain('Generate docs and agent scaffolding'); 32 | expect(output).toContain('"docs", "agents", or "both"'); 33 | expect(output).toContain('[type]'); 34 | expect(output).toContain('(default)'); 35 | [ 36 | '-o, --output ', 37 | '--exclude ', 38 | '--include ', 39 | '-v, --verbose' 40 | ].forEach(option => expect(output).toContain(option)); 41 | }); 42 | }); 43 | 44 | describe('fill command', () => { 45 | it('should display help for fill command', () => { 46 | const output = execSync(`node ${cliPath} fill --help`, { encoding: 'utf8' }); 47 | expect(output).toContain('Use an LLM to fill generated docs and agent playbooks'); 48 | [ 49 | '-o, --output ', 50 | '-k, --api-key ', 51 | '-m, --model ', 52 | '-p, --provider ', 53 | '--base-url ', 54 | '--prompt ', 55 | '--limit ', 56 | '--exclude ', 57 | '--include ', 58 | '-v, --verbose' 59 | ].forEach(option => expect(output).toContain(option)); 60 | }); 61 | }); 62 | 63 | describe('plan command', () => { 64 | it('should display help for plan command', () => { 65 | const output = execSync(`node ${cliPath} plan --help`, { encoding: 'utf8' }); 66 | expect(output).toContain('Create a development plan that links documentation and agent playbooks'); 67 | [ 68 | '-o, --output ', 69 | '--title ', 70 | '--summary <text>', 71 | '-f, --force', 72 | '--fill', 73 | '-r, --repo <path>', 74 | '-k, --api-key <key>', 75 | '-m, --model <model>', 76 | '-p, --provider <provider>', 77 | '--base-url <url>', 78 | '--prompt <file>', 79 | '--dry-run', 80 | '--include <patterns...>', 81 | '--exclude <patterns...>', 82 | '-v, --verbose' 83 | ].forEach(option => expect(output).toContain(option)); 84 | }); 85 | }); 86 | }); 87 | -------------------------------------------------------------------------------- /src/generators/shared/generatorUtils.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs-extra'; 2 | import * as path from 'path'; 3 | import chalk from 'chalk'; 4 | import { RepoStructure } from '../../types'; 5 | 6 | export class GeneratorUtils { 7 | static formatBytes(bytes: number): string { 8 | if (bytes === 0) return '0 Bytes'; 9 | const k = 1024; 10 | const sizes = ['Bytes', 'KB', 'MB', 'GB']; 11 | const i = Math.floor(Math.log(bytes) / Math.log(k)); 12 | return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; 13 | } 14 | 15 | static slugify(text: string): string { 16 | return text 17 | .toLowerCase() 18 | .replace(/\s+/g, '-') 19 | .replace(/[^a-z0-9-]/g, ''); 20 | } 21 | 22 | static formatModuleName(name: string): string { 23 | return name 24 | .split(/[-_]/) 25 | .map(word => word.charAt(0).toUpperCase() + word.slice(1)) 26 | .join(' '); 27 | } 28 | 29 | static formatTitle(text: string): string { 30 | return text.split('-').map(word => 31 | word.charAt(0).toUpperCase() + word.slice(1) 32 | ).join(' '); 33 | } 34 | 35 | static async ensureDirectoryAndLog(dir: string, verbose: boolean, description: string): Promise<void> { 36 | await fs.ensureDir(dir); 37 | if (verbose) { 38 | console.log(chalk.blue(`📁 ${description}: ${dir}`)); 39 | } 40 | } 41 | 42 | static async writeFileWithLogging( 43 | filePath: string, 44 | content: string, 45 | verbose: boolean, 46 | successMessage?: string 47 | ): Promise<void> { 48 | const fileName = path.basename(filePath); 49 | 50 | if (verbose) { 51 | console.log(chalk.blue(`📄 Creating ${fileName}...`)); 52 | } 53 | 54 | await fs.writeFile(filePath, content); 55 | 56 | if (verbose) { 57 | console.log(chalk.green(`✅ ${successMessage || `Created ${fileName}`}`)); 58 | } 59 | } 60 | 61 | static logError(message: string, error: any, verbose: boolean): void { 62 | if (verbose) { 63 | console.log(chalk.red(`❌ ${message}: ${error}`)); 64 | } 65 | } 66 | 67 | static logProgress(message: string, verbose: boolean): void { 68 | if (verbose) { 69 | console.log(chalk.yellow(message)); 70 | } 71 | } 72 | 73 | static getFileTypeDistribution(repoStructure: RepoStructure): Map<string, number> { 74 | const extensions = new Map<string, number>(); 75 | repoStructure.files.forEach(file => { 76 | const ext = file.extension || 'no-extension'; 77 | extensions.set(ext, (extensions.get(ext) || 0) + 1); 78 | }); 79 | return extensions; 80 | } 81 | 82 | static getTopFileExtensions(repoStructure: RepoStructure, limit: number = 5): Array<[string, number]> { 83 | const extensions = this.getFileTypeDistribution(repoStructure); 84 | return Array.from(extensions.entries()) 85 | .sort((a, b) => b[1] - a[1]) 86 | .slice(0, limit); 87 | } 88 | 89 | static createTimestamp(): string { 90 | return new Date().toISOString(); 91 | } 92 | 93 | static createGeneratedByFooter(additionalInfo?: string): string { 94 | return `--- 95 | *Generated by AI Coders Context* 96 | ${additionalInfo ? `*${additionalInfo}*` : ''} 97 | *Generated on: ${this.createTimestamp()}* 98 | `; 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /AGENTS.md: -------------------------------------------------------------------------------- 1 | # Repository Guidelines 2 | 3 | ## Project Structure & Module Organization 4 | `src/index.ts` owns the Commander CLI and now scaffolds documentation and agent playbooks without hitting any LLM endpoints. Generators live under `src/generators`, utilities (CLI helpers, file mapping, git support) stay in `src/utils`, and type contracts in `src/types.ts`. Built artefacts land in `dist/` after `npm run build`, while generated assets are saved to `./.context`. Treat `docs/README.md` as the navigation hub for documentation deliverables and `agents/README.md` as the index for agent playbooks. 5 | 6 | ## Build, Test, and Development Commands 7 | Install dependencies with `npm install`. Run `npm run dev` for an interactive TypeScript session via `tsx`, and `npm run build` to emit the executable CommonJS bundle in `dist/`. Execute the suite with `npm run test`; append `-- --watch` for iterative loops. Publish helpers (`npm run release`, `release:minor`, `release:major`) still bump the package version and push to npm—use them only from a clean main branch. 8 | 9 | ## Coding Style & Naming Conventions 10 | The project relies on strict TypeScript; keep new files inside `src` and leave compiler checks enabled. Follow the prevailing two-space indentation, single quotes, and trailing commas for multi-line literals. Prefer named exports for modules, using PascalCase for classes, camelCase for variables and functions, and SCREAMING_SNAKE_CASE for constants. When you add scaffolding examples, cross-link them in `docs/README.md` and `agents/README.md` so contributors can discover the updates quickly. 11 | 12 | ## Testing Guidelines 13 | Place Jest specs alongside the files they cover with the `*.test.ts` suffix. Validate CLI behaviours against the compiled binary (`dist/index.js`) to mirror how end-users invoke the tool. Run `npm run build && npm run test` before sending a PR, and include `npm run test -- --coverage` when you touch critical flows or generators. 14 | 15 | ## Documentation Markers & AI Tags 16 | Scaffolded guides now include: 17 | - YAML front matter describing the AI task (`id`, `ai_update_goal`, `required_inputs`, `success_criteria`). 18 | - Update wrappers such as `<!-- agent-update:start:project-overview -->` ... `<!-- agent-update:end -->` that bound sections an agent may rewrite. 19 | - Placeholders like `<!-- agent-fill:directory-src -->` signalling content that still needs human-provided context. 20 | - Guard rails such as `<!-- agent-readonly:guidance -->` marking sections that should remain instructional unless a maintainer says otherwise. 21 | 22 | When editing docs or adding new ones, preserve existing markers and introduce new ones where agents should focus future updates. Reference these markers from agent playbooks when you create specialised workflows. 23 | 24 | ### LLM-assisted Updates 25 | - Use `ai-context fill <repo>` to apply the shared prompt (`prompts/update_scaffold_prompt.md`) across the scaffold. 26 | - Use a small `--limit` while validating new instructions. 27 | - Always review the generated Markdown before committing; adjust the prompt if the model misinterprets success criteria. 28 | 29 | ## Commit & Pull Request Guidelines 30 | Stick to Conventional Commits (`feat(scaffolding): ...`, `fix(cli): ...`, `chore:`). Keep messages imperative and scope names aligned with folder structure. In pull requests, describe the user impact, link related issues, and attach sample output from the new scaffolds (`docs/README.md`, `agents/README.md`) whenever behaviour changes. Confirm CI status and call out any manual follow-up for reviewers. 31 | 32 | ## Environment & Release Tips 33 | No API keys are required for scaffolding; remove stale tokens from local `.env` files. Ensure `dist/` reflects the latest build before publishing and double-check that `package.json`'s version matches the intended release tag. If you modify the scaffold templates, refresh `docs/README.md` and `agents/README.md` in your commit so downstream teams receive the latest references. 34 | ## AI Context References 35 | - Documentation index: `.context/docs/README.md` 36 | - Agent playbooks: `.context/agents/README.md` 37 | 38 | -------------------------------------------------------------------------------- /src/generators/agents/agentGenerator.test.ts: -------------------------------------------------------------------------------- 1 | import * as os from 'os'; 2 | import * as path from 'path'; 3 | import * as fs from 'fs-extra'; 4 | 5 | import { AgentGenerator } from './agentGenerator'; 6 | import { AGENT_TYPES } from './agentTypes'; 7 | import type { RepoStructure } from '../../types'; 8 | 9 | function createRepoStructure(rootPath: string): RepoStructure { 10 | return { 11 | rootPath, 12 | files: [ 13 | { 14 | path: path.join(rootPath, 'src/index.ts'), 15 | relativePath: 'src/index.ts', 16 | extension: '.ts', 17 | size: 128, 18 | type: 'file' 19 | } 20 | ], 21 | directories: [ 22 | { 23 | path: path.join(rootPath, 'src'), 24 | relativePath: 'src', 25 | extension: '', 26 | size: 0, 27 | type: 'directory' 28 | }, 29 | { 30 | path: path.join(rootPath, 'docs'), 31 | relativePath: 'docs', 32 | extension: '', 33 | size: 0, 34 | type: 'directory' 35 | }, 36 | { 37 | path: path.join(rootPath, 'agents'), 38 | relativePath: 'agents', 39 | extension: '', 40 | size: 0, 41 | type: 'directory' 42 | } 43 | ], 44 | totalFiles: 1, 45 | totalSize: 128, 46 | topLevelDirectoryStats: [ 47 | { 48 | name: 'src', 49 | fileCount: 1, 50 | totalSize: 128 51 | }, 52 | { 53 | name: 'docs', 54 | fileCount: 0, 55 | totalSize: 0 56 | }, 57 | { 58 | name: 'agents', 59 | fileCount: 0, 60 | totalSize: 0 61 | } 62 | ] 63 | }; 64 | } 65 | 66 | describe('AgentGenerator', () => { 67 | let tempDir: string; 68 | let outputDir: string; 69 | const generator = new AgentGenerator(); 70 | 71 | beforeEach(async () => { 72 | tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'ai-context-agents-')); 73 | outputDir = path.join(tempDir, '.context'); 74 | }); 75 | 76 | afterEach(async () => { 77 | if (tempDir) { 78 | await fs.remove(tempDir); 79 | } 80 | }); 81 | 82 | it('generates selected agent playbooks and index', async () => { 83 | const repoStructure = createRepoStructure(path.join(tempDir, 'repo')); 84 | const selectedAgents = ['code-reviewer', 'test-writer']; 85 | 86 | const created = await generator.generateAgentPrompts( 87 | repoStructure, 88 | outputDir, 89 | selectedAgents 90 | ); 91 | 92 | expect(created).toBe(selectedAgents.length + 1); 93 | 94 | const agentsDir = path.join(outputDir, 'agents'); 95 | const files = (await fs.readdir(agentsDir)).sort(); 96 | expect(files).toEqual(['README.md', 'code-reviewer.md', 'test-writer.md']); 97 | 98 | const playbookContent = await fs.readFile(path.join(agentsDir, 'code-reviewer.md'), 'utf8'); 99 | expect(playbookContent).toContain('# Code Reviewer Agent Playbook'); 100 | expect(playbookContent).toContain('Documentation Touchpoints'); 101 | expect(playbookContent).toContain('<!-- agent-update:start:agent-code-reviewer -->'); 102 | expect(playbookContent).toContain('agent-update:project-overview'); 103 | 104 | const indexContent = await fs.readFile(path.join(agentsDir, 'README.md'), 'utf8'); 105 | expect(indexContent).toContain('[Code Reviewer](./code-reviewer.md)'); 106 | expect(indexContent).toContain('[Test Writer](./test-writer.md)'); 107 | }); 108 | 109 | it('falls back to all agent types when selection is invalid', async () => { 110 | const repoStructure = createRepoStructure(path.join(tempDir, 'repo')); 111 | 112 | const created = await generator.generateAgentPrompts( 113 | repoStructure, 114 | outputDir, 115 | ['not-a-real-agent'] 116 | ); 117 | 118 | expect(created).toBe(AGENT_TYPES.length + 1); 119 | 120 | const agentsDir = path.join(outputDir, 'agents'); 121 | const files = await fs.readdir(agentsDir); 122 | AGENT_TYPES.forEach(agent => { 123 | expect(files).toContain(`${agent}.md`); 124 | }); 125 | }); 126 | }); 127 | -------------------------------------------------------------------------------- /src/generators/agents/templates/playbookTemplate.ts: -------------------------------------------------------------------------------- 1 | import { AGENT_RESPONSIBILITIES, AGENT_BEST_PRACTICES } from '../agentConfig'; 2 | import { AgentType } from '../agentTypes'; 3 | import { formatDirectoryList } from '../../shared/directoryTemplateHelpers'; 4 | import { DocTouchpoint } from './types'; 5 | 6 | export function renderAgentPlaybook( 7 | agentType: AgentType, 8 | topLevelDirectories: string[], 9 | touchpoints: DocTouchpoint[] 10 | ): string { 11 | const title = formatTitle(agentType); 12 | const responsibilities = AGENT_RESPONSIBILITIES[agentType] || ['Clarify this agent\'s responsibilities.']; 13 | const bestPractices = AGENT_BEST_PRACTICES[agentType] || ['Document preferred workflows.']; 14 | const directoryList = formatDirectoryList(topLevelDirectories); 15 | const markerId = `agent-${agentType}`; 16 | 17 | const touchpointList = touchpoints 18 | .map(tp => `- [${tp.title}](${tp.path}) — ${tp.marker}`) 19 | .join('\n'); 20 | 21 | return `<!-- agent-update:start:${markerId} --> 22 | # ${title} Agent Playbook 23 | 24 | ## Mission 25 | Describe how the ${title.toLowerCase()} agent supports the team and when to engage it. 26 | 27 | ## Responsibilities 28 | ${formatList(responsibilities)} 29 | 30 | ## Best Practices 31 | ${formatList(bestPractices)} 32 | 33 | ## Key Project Resources 34 | - Documentation index: [docs/README.md](../docs/README.md) 35 | - Agent handbook: [agents/README.md](./README.md) 36 | - Agent knowledge base: [AGENTS.md](../../AGENTS.md) 37 | - Contributor guide: [CONTRIBUTING.md](../../CONTRIBUTING.md) 38 | 39 | ## Repository Starting Points 40 | ${directoryList || '- Add directory highlights relevant to this agent.'} 41 | 42 | ## Documentation Touchpoints 43 | ${touchpointList} 44 | 45 | <!-- agent-readonly:guidance --> 46 | ## Collaboration Checklist 47 | 1. Confirm assumptions with issue reporters or maintainers. 48 | 2. Review open pull requests affecting this area. 49 | 3. Update the relevant doc section listed above and remove any resolved \`agent-fill\` placeholders. 50 | 4. Capture learnings back in [docs/README.md](../docs/README.md) or the appropriate task marker. 51 | 52 | ## Success Metrics 53 | Track effectiveness of this agent's contributions: 54 | - **Code Quality:** Reduced bug count, improved test coverage, decreased technical debt 55 | - **Velocity:** Time to complete typical tasks, deployment frequency 56 | - **Documentation:** Coverage of features, accuracy of guides, usage by team 57 | - **Collaboration:** PR review turnaround time, feedback quality, knowledge sharing 58 | 59 | **Target Metrics:** 60 | - TODO: Define measurable goals specific to this agent (e.g., "Reduce bug resolution time by 30%") 61 | - TODO: Track trends over time to identify improvement areas 62 | 63 | ## Troubleshooting Common Issues 64 | Document frequent problems this agent encounters and their solutions: 65 | 66 | ### Issue: [Common Problem] 67 | **Symptoms:** Describe what indicates this problem 68 | **Root Cause:** Why this happens 69 | **Resolution:** Step-by-step fix 70 | **Prevention:** How to avoid in the future 71 | 72 | **Example:** 73 | ### Issue: Build Failures Due to Outdated Dependencies 74 | **Symptoms:** Tests fail with module resolution errors 75 | **Root Cause:** Package versions incompatible with codebase 76 | **Resolution:** 77 | 1. Review package.json for version ranges 78 | 2. Run \`npm update\` to get compatible versions 79 | 3. Test locally before committing 80 | **Prevention:** Keep dependencies updated regularly, use lockfiles 81 | 82 | ## Hand-off Notes 83 | Summarize outcomes, remaining risks, and suggested follow-up actions after the agent completes its work. 84 | 85 | ## Evidence to Capture 86 | - Reference commits, issues, or ADRs used to justify updates. 87 | - Command output or logs that informed recommendations. 88 | - Follow-up items for maintainers or future agent runs. 89 | - Performance metrics and benchmarks where applicable. 90 | <!-- agent-update:end --> 91 | `; 92 | } 93 | 94 | function formatTitle(agentType: string): string { 95 | return agentType 96 | .split('-') 97 | .map(segment => segment.charAt(0).toUpperCase() + segment.slice(1)) 98 | .join(' '); 99 | } 100 | 101 | function formatList(items: string[]): string { 102 | if (!items.length) { 103 | return '- _No entries defined yet._'; 104 | } 105 | return items.map(item => `- ${item}`).join('\n'); 106 | } 107 | -------------------------------------------------------------------------------- /src/generators/plans/planGenerator.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as fs from 'fs-extra'; 3 | 4 | import { GeneratorUtils } from '../shared'; 5 | import { AgentType, AGENT_TYPES } from '../agents/agentTypes'; 6 | import { AGENT_RESPONSIBILITIES } from '../agents/agentConfig'; 7 | import { getGuidesByKeys } from '../documentation/guideRegistry'; 8 | import { renderPlanTemplate } from './templates/planTemplate'; 9 | import { renderPlanIndex } from './templates/indexTemplate'; 10 | import { PlanAgentSummary, PlanIndexEntry } from './templates/types'; 11 | 12 | interface PlanGeneratorOptions { 13 | planName: string; 14 | outputDir: string; 15 | title?: string; 16 | summary?: string; 17 | selectedAgentTypes?: string[] | null; 18 | selectedDocKeys?: string[] | null; 19 | force?: boolean; 20 | verbose?: boolean; 21 | } 22 | 23 | interface PlanGenerationResult { 24 | planPath: string; 25 | relativePath: string; 26 | slug: string; 27 | } 28 | 29 | export class PlanGenerator { 30 | async generatePlan(options: PlanGeneratorOptions): Promise<PlanGenerationResult> { 31 | const { 32 | planName, 33 | outputDir, 34 | title, 35 | summary, 36 | selectedAgentTypes, 37 | selectedDocKeys, 38 | force = false, 39 | verbose = false 40 | } = options; 41 | 42 | const slug = GeneratorUtils.slugify(planName); 43 | if (!slug) { 44 | throw new Error('Plan name must contain at least one alphanumeric character.'); 45 | } 46 | 47 | const planTitle = title?.trim() || GeneratorUtils.formatTitle(slug); 48 | const resolvedOutput = path.resolve(outputDir); 49 | const plansDir = path.join(resolvedOutput, 'plans'); 50 | 51 | await fs.ensureDir(resolvedOutput); 52 | await GeneratorUtils.ensureDirectoryAndLog(plansDir, verbose, 'Ensuring plans directory'); 53 | 54 | const planFileName = `${slug}.md`; 55 | const planPath = path.join(plansDir, planFileName); 56 | 57 | if (!force && await fs.pathExists(planPath)) { 58 | throw new Error(`Plan already exists at ${planPath}. Use --force to overwrite.`); 59 | } 60 | 61 | const agentSummaries = this.resolveAgents(selectedAgentTypes); 62 | const docGuides = selectedDocKeys === null 63 | ? [] 64 | : getGuidesByKeys(selectedDocKeys || undefined); 65 | 66 | const content = renderPlanTemplate({ 67 | title: planTitle, 68 | slug, 69 | summary, 70 | agents: agentSummaries, 71 | docs: docGuides 72 | }); 73 | 74 | await GeneratorUtils.writeFileWithLogging( 75 | planPath, 76 | content, 77 | verbose, 78 | `Created ${planFileName}` 79 | ); 80 | 81 | await this.updatePlanIndex(plansDir, verbose); 82 | 83 | return { 84 | planPath, 85 | relativePath: path.relative(resolvedOutput, planPath), 86 | slug 87 | }; 88 | } 89 | 90 | private resolveAgents(selected?: string[] | null): PlanAgentSummary[] { 91 | const allowed = new Set<AgentType>(Array.from(AGENT_TYPES)); 92 | 93 | if (selected === null) { 94 | return []; 95 | } 96 | 97 | const chosen: AgentType[] = selected && selected.length > 0 98 | ? Array.from(new Set(selected.map(value => value.toLowerCase().trim()))) 99 | .filter(value => allowed.has(value as AgentType)) as AgentType[] 100 | : Array.from(allowed); 101 | 102 | return chosen.map(type => ({ 103 | type, 104 | title: GeneratorUtils.formatTitle(type), 105 | responsibility: AGENT_RESPONSIBILITIES[type]?.[0] || 'Document this agent\'s primary responsibility.' 106 | })); 107 | } 108 | 109 | private async updatePlanIndex(plansDir: string, verbose: boolean): Promise<void> { 110 | const files = await fs.readdir(plansDir); 111 | const entries: PlanIndexEntry[] = files 112 | .filter(file => file.toLowerCase().endsWith('.md') && file.toLowerCase() !== 'readme.md') 113 | .map(file => file.replace(/\.md$/i, '')) 114 | .map(slug => ({ slug, title: GeneratorUtils.formatTitle(slug) })) 115 | .sort((a, b) => a.title.localeCompare(b.title)); 116 | 117 | const indexContent = renderPlanIndex(entries); 118 | const indexPath = path.join(plansDir, 'README.md'); 119 | 120 | await GeneratorUtils.writeFileWithLogging( 121 | indexPath, 122 | indexContent, 123 | verbose, 124 | 'Updated plans index' 125 | ); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/utils/fileMapper.ts: -------------------------------------------------------------------------------- 1 | import * as fs from 'fs-extra'; 2 | import * as path from 'path'; 3 | import { glob } from 'glob'; 4 | import { FileInfo, RepoStructure, TopLevelDirectoryStats } from '../types'; 5 | 6 | export class FileMapper { 7 | private excludePatterns: string[] = [ 8 | 'node_modules/**', 9 | '.git/**', 10 | 'dist/**', 11 | 'build/**', 12 | '*.log', 13 | '.env*', 14 | '*.tmp', 15 | '**/.DS_Store' 16 | ]; 17 | 18 | constructor(customExcludes: string[] = []) { 19 | this.excludePatterns = [...this.excludePatterns, ...customExcludes]; 20 | } 21 | 22 | async mapRepository(repoPath: string, includePatterns?: string[]): Promise<RepoStructure> { 23 | const absolutePath = path.resolve(repoPath); 24 | 25 | if (!await fs.pathExists(absolutePath)) { 26 | throw new Error(`Repository path does not exist: ${absolutePath}`); 27 | } 28 | 29 | const patterns = includePatterns || ['**/*']; 30 | const allFiles: string[] = []; 31 | 32 | for (const pattern of patterns) { 33 | const files = await glob(pattern, { 34 | cwd: absolutePath, 35 | ignore: this.excludePatterns, 36 | dot: false, 37 | absolute: false 38 | }); 39 | allFiles.push(...files); 40 | } 41 | 42 | const uniqueFiles = [...new Set(allFiles)]; 43 | const fileInfos: FileInfo[] = []; 44 | const directories: FileInfo[] = []; 45 | let totalSize = 0; 46 | const topLevelStats = new Map<string, { fileCount: number; totalSize: number }>(); 47 | const concurrency = 32; 48 | 49 | for (let index = 0; index < uniqueFiles.length; index += concurrency) { 50 | const slice = uniqueFiles.slice(index, index + concurrency); 51 | await Promise.all( 52 | slice.map(async relativePath => { 53 | const fullPath = path.join(absolutePath, relativePath); 54 | const stats = await fs.stat(fullPath); 55 | const info: FileInfo = { 56 | path: fullPath, 57 | relativePath, 58 | extension: path.extname(relativePath), 59 | size: stats.size, 60 | type: stats.isDirectory() ? 'directory' : 'file' 61 | }; 62 | 63 | const topLevelSegment = this.extractTopLevelSegment(relativePath); 64 | if (topLevelSegment) { 65 | const current = topLevelStats.get(topLevelSegment) ?? { fileCount: 0, totalSize: 0 }; 66 | if (!stats.isDirectory()) { 67 | current.fileCount += 1; 68 | current.totalSize += stats.size; 69 | } 70 | topLevelStats.set(topLevelSegment, current); 71 | } 72 | 73 | if (stats.isDirectory()) { 74 | directories.push(info); 75 | } else { 76 | fileInfos.push(info); 77 | totalSize += stats.size; 78 | } 79 | 80 | }) 81 | ); 82 | } 83 | 84 | const topLevelDirectoryStats: TopLevelDirectoryStats[] = Array.from(topLevelStats.entries()) 85 | .map(([name, stats]) => ({ name, fileCount: stats.fileCount, totalSize: stats.totalSize })) 86 | .sort((a, b) => a.name.localeCompare(b.name)); 87 | 88 | return { 89 | rootPath: absolutePath, 90 | files: fileInfos, 91 | directories, 92 | totalFiles: fileInfos.length, 93 | totalSize, 94 | topLevelDirectoryStats 95 | }; 96 | } 97 | 98 | async readFileContent(filePath: string): Promise<string> { 99 | try { 100 | return await fs.readFile(filePath, 'utf-8'); 101 | } catch (error) { 102 | return `Error reading file: ${error instanceof Error ? error.message : String(error)}`; 103 | } 104 | } 105 | 106 | getFilesByExtension(files: FileInfo[], extension: string): FileInfo[] { 107 | return files.filter(file => file.extension === extension); 108 | } 109 | 110 | isTextFile(filePath: string): boolean { 111 | const textExtensions = [ 112 | '.js', '.ts', '.jsx', '.tsx', '.py', '.java', '.cpp', '.c', '.h', 113 | '.css', '.scss', '.sass', '.html', '.xml', '.json', '.yaml', '.yml', 114 | '.md', '.txt', '.sql', '.sh', '.bat', '.ps1', '.php', '.rb', '.go', 115 | '.rs', '.swift', '.kt', '.scala', '.r', '.m', '.pl', '.lua', '.vim', 116 | '.dockerfile', '.gitignore', '.env' 117 | ]; 118 | 119 | const ext = path.extname(filePath).toLowerCase(); 120 | return textExtensions.includes(ext) || !ext; 121 | } 122 | 123 | private extractTopLevelSegment(relativePath: string): string | null { 124 | const parts = relativePath.split(/[/\\]/).filter(Boolean); 125 | return parts.length > 0 ? parts[0] : null; 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/generators/documentation/documentationGenerator.test.ts: -------------------------------------------------------------------------------- 1 | import * as os from 'os'; 2 | import * as path from 'path'; 3 | import * as fs from 'fs-extra'; 4 | 5 | import { DocumentationGenerator } from './documentationGenerator'; 6 | import { DOCUMENT_GUIDES } from './guideRegistry'; 7 | import type { RepoStructure } from '../../types'; 8 | 9 | function createRepoStructure(rootPath: string): RepoStructure { 10 | return { 11 | rootPath, 12 | files: [ 13 | { 14 | path: path.join(rootPath, 'src/index.ts'), 15 | relativePath: 'src/index.ts', 16 | extension: '.ts', 17 | size: 128, 18 | type: 'file' 19 | }, 20 | { 21 | path: path.join(rootPath, 'package.json'), 22 | relativePath: 'package.json', 23 | extension: '.json', 24 | size: 256, 25 | type: 'file' 26 | } 27 | ], 28 | directories: [ 29 | { 30 | path: path.join(rootPath, 'src'), 31 | relativePath: 'src', 32 | extension: '', 33 | size: 0, 34 | type: 'directory' 35 | }, 36 | { 37 | path: path.join(rootPath, 'tests'), 38 | relativePath: 'tests', 39 | extension: '', 40 | size: 0, 41 | type: 'directory' 42 | } 43 | ], 44 | totalFiles: 2, 45 | totalSize: 384, 46 | topLevelDirectoryStats: [ 47 | { 48 | name: 'src', 49 | fileCount: 1, 50 | totalSize: 128 51 | }, 52 | { 53 | name: 'tests', 54 | fileCount: 0, 55 | totalSize: 0 56 | } 57 | ] 58 | }; 59 | } 60 | 61 | describe('DocumentationGenerator', () => { 62 | let tempDir: string; 63 | let outputDir: string; 64 | const generator = new DocumentationGenerator(); 65 | 66 | beforeEach(async () => { 67 | tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'ai-context-docs-')); 68 | outputDir = path.join(tempDir, '.context'); 69 | await fs.ensureDir(path.join(tempDir, 'repo')); 70 | }); 71 | 72 | afterEach(async () => { 73 | if (tempDir) { 74 | await fs.remove(tempDir); 75 | } 76 | }); 77 | 78 | it('generates all guides with agent-update markers by default', async () => { 79 | const repoStructure = createRepoStructure(path.join(tempDir, 'repo')); 80 | 81 | const created = await generator.generateDocumentation(repoStructure, outputDir); 82 | 83 | expect(created).toBe(DOCUMENT_GUIDES.length + 1); 84 | 85 | const docsDir = path.join(outputDir, 'docs'); 86 | const files = (await fs.readdir(docsDir)).sort(); 87 | const expectedFiles = ['README.md', ...DOCUMENT_GUIDES.map(guide => guide.file)].sort(); 88 | expect(files).toEqual(expectedFiles); 89 | 90 | const indexContent = await fs.readFile(path.join(docsDir, 'README.md'), 'utf8'); 91 | expect(indexContent).toContain('<!-- agent-update:start:docs-index -->'); 92 | expect(indexContent).toContain('# Documentation Index'); 93 | 94 | const overviewContent = await fs.readFile(path.join(docsDir, 'project-overview.md'), 'utf8'); 95 | expect(overviewContent).toContain('<!-- agent-update:start:project-overview -->'); 96 | expect(overviewContent).toContain('# Project Overview'); 97 | expect(overviewContent).toContain('Root path:'); 98 | }); 99 | 100 | it('respects explicit guide selection', async () => { 101 | const repoStructure = createRepoStructure(path.join(tempDir, 'repo')); 102 | const selected = ['project-overview', 'glossary']; 103 | 104 | const created = await generator.generateDocumentation( 105 | repoStructure, 106 | outputDir, 107 | { selectedDocs: selected } 108 | ); 109 | 110 | expect(created).toBe(selected.length + 1); 111 | 112 | const docsDir = path.join(outputDir, 'docs'); 113 | const files = (await fs.readdir(docsDir)).sort(); 114 | expect(files).toEqual(['README.md', 'glossary.md', 'project-overview.md']); 115 | }); 116 | 117 | it('creates AGENTS.md when missing using the default template', async () => { 118 | const repoStructure = createRepoStructure(path.join(tempDir, 'repo')); 119 | 120 | await generator.generateDocumentation(repoStructure, outputDir); 121 | 122 | const agentsPath = path.join(repoStructure.rootPath, 'AGENTS.md'); 123 | const content = await fs.readFile(agentsPath, 'utf8'); 124 | 125 | expect(content).toContain('# AGENTS.md'); 126 | expect(content).toContain('## Dev environment tips'); 127 | expect(content).toContain('`.context/agents/README.md`'); 128 | }); 129 | 130 | it('adds AI context references to AGENTS.md when present', async () => { 131 | const repoPath = path.join(tempDir, 'repo'); 132 | const agentsPath = path.join(repoPath, 'AGENTS.md'); 133 | await fs.outputFile(agentsPath, '# Agent Guide\n\nExisting content.\n'); 134 | const repoStructure = createRepoStructure(repoPath); 135 | 136 | await generator.generateDocumentation(repoStructure, outputDir); 137 | 138 | const updatedAgents = await fs.readFile(agentsPath, 'utf8'); 139 | expect(updatedAgents).toContain('## AI Context References'); 140 | expect(updatedAgents).toContain('`.context/docs/README.md`'); 141 | expect(updatedAgents).toContain('`.context/agents/README.md`'); 142 | }); 143 | }); 144 | -------------------------------------------------------------------------------- /src/prompts/defaults.ts: -------------------------------------------------------------------------------- 1 | export const UPDATE_SCAFFOLD_PROMPT_FALLBACK = `# Prompt: Update Repository Documentation and Agent Playbooks 2 | 3 | ## Purpose 4 | You are an AI assistant responsible for refreshing the documentation (\`docs/\`) and agent playbooks (\`agents/\`) generated by the ai-context scaffolding tool. Your goal is to bring every guide up to date with the latest repository state, fill in placeholder sections, and maintain cross-references between docs and agent instructions. 5 | 6 | ## Context Gathering Checklist 7 | 1. Run \`git status -sb\` to understand pending changes. 8 | 2. Review the latest merged commits or PRs related to documentation, architecture, workflow, or testing. 9 | 3. Inspect \`package.json\`, CI configuration, and any release or roadmap notes stored in the repository. 10 | 4. Check \`docs/README.md\` for the current document map and update AI markers (\`agent-update:*\`). 11 | 5. Identify unresolved placeholders marked as \`<!-- agent-fill:* -->\`. 12 | 13 | ## Update Procedure 14 | 1. **Select a Guide** 15 | - Navigate to \`docs/<guide>.md\`. 16 | - Read the YAML front matter (\`ai_update_goal\`, \`required_inputs\`, \`success_criteria\`) and ensure you collect the listed inputs before editing. 17 | 18 | 2. **Edit Within Update Wrappers** 19 | - Update content strictly inside the matching \`<!-- agent-update:start:... -->\` block and keep the closing \`<!-- agent-update:end -->\` tag. 20 | - Remove or replace any \`TODO\` text with accurate, current information. 21 | - When you complete a placeholder slot (\`<!-- agent-fill:... -->\`), remove the wrapper and provide the finalized description. 22 | 23 | 3. **Cross-Link Updates** 24 | - Verify that links between docs remain valid. 25 | - If you add new guides or sections, update \`docs/README.md\` and the document map table. 26 | 27 | 4. **Agent Playbook Alignment** 28 | - For each change in \`docs/\`, adjust the related \`agents/*.md\` playbooks. 29 | - Ensure the "Documentation Touchpoints" list references the correct \`agent-update\` markers. 30 | - Update collaboration checklists and evidence sections to reflect the latest workflows. 31 | 32 | 5. **Evidence & Traceability** 33 | - Note key sources (commit hashes, issues, ADRs) in the "Evidence to Capture" or "AI Update Checklist" summary lines. 34 | - If anything is ambiguous or requires human follow-up, leave a concise note clearly labelled for maintainers. 35 | 36 | ## Acceptance Criteria 37 | - Every guide’s \`success_criteria\` field is satisfied. 38 | - No unresolved \`TODO\` or \`agent-fill\` blocks remain unless they require explicit human input; in such cases, add a comment explaining the dependency. 39 | - Agent playbooks list accurate responsibilities, best practices, and pointer links to the refreshed docs. 40 | - Changes are self-contained, well-formatted Markdown, and reference any new external resources introduced. 41 | 42 | ## Deliverables 43 | - Updated Markdown files committed to the repository. 44 | - A short changelog or PR description summarizing: 45 | - Guides touched 46 | - Key decisions or facts added 47 | - Evidence links and any pending follow-ups 48 | `; 49 | 50 | export const UPDATE_PLAN_PROMPT_FALLBACK = `# Prompt: Update Collaboration Plans 51 | 52 | ## Purpose 53 | You are an AI assistant responsible for refining collaboration plans that live in the \`.context/plans/\` directory. Each plan orchestrates work across documentation guides (\`docs/\`) and agent playbooks (\`agents/\`). Your goal is to replace placeholders with actionable guidance that keeps the plan aligned with the referenced docs, agents, and repository context. 54 | 55 | ## Preparation Checklist 56 | 1. Review the plan’s YAML front matter to understand the stated \`ai_update_goal\`, \`required_inputs\`, and \`success_criteria\`. 57 | 2. Inspect the provided documentation excerpts (from \`docs/\`) and agent playbooks to ensure the plan reflects their current guidance. 58 | 3. Confirm that the “Agent Lineup” and “Documentation Touchpoints” tables link to real files and reference the correct \`agent-update\` markers. 59 | 4. Note any TODOs, \`agent-fill\` placeholders, or missing evidence sections that must be resolved. 60 | 61 | ## Update Procedure 62 | 1. **Task Snapshot** 63 | - Summarize the primary goal and success signal in concrete terms. 64 | - List authoritative references (docs, issues, specs) that contributors should consult. 65 | 66 | 2. **Agent Alignment** 67 | - For each agent in the lineup, describe why they are involved and call out the first responsibility they should focus on. 68 | - Ensure playbook links and responsibility summaries match the referenced agent files. 69 | 70 | 3. **Documentation Touchpoints** 71 | - Map each plan stage to the docs excerpts provided, highlighting which sections need to be updated during execution. 72 | - Keep the table sorted and ensure the listed \`agent-update\` markers exist. 73 | 74 | 4. **Working Phases** 75 | - Break the work into sequential phases that each include a numbered list of steps, accountable owners, deliverables, and evidence expectations. 76 | - Close every phase with an explicit Git commit checkpoint (for example, \`git commit -m "chore(plan): complete phase 1"\`) so progress is recorded in version control. 77 | - Reference documentation and agent resources that the team should consult while executing each phase. 78 | 79 | 5. **Evidence & Follow-up** 80 | - Specify the artefacts that must be captured (PR links, test runs, change logs) before the plan is considered complete. 81 | - Record any follow-up actions or decisions that require human confirmation. 82 | 83 | ## Acceptance Criteria 84 | - Every TODO or placeholder inside the plan’s \`agent-update\` block is resolved or accompanied by a clear escalation note. 85 | - Tables reference existing files and stay in sync with the docs/agent indices. 86 | - Phases provide actionable guidance, include numbered steps, and end with an explicit Git commit checkpoint. 87 | - The plan remains fully self-contained and ready for contributors to execute. 88 | 89 | ## Deliverables 90 | - Updated plan Markdown returned verbatim. 91 | - No additional commentary outside the Markdown output. 92 | `; 93 | -------------------------------------------------------------------------------- /src/generators/agents/agentConfig.ts: -------------------------------------------------------------------------------- 1 | import { AgentType } from './agentTypes'; 2 | 3 | export const AGENT_RESPONSIBILITIES: Record<AgentType, string[]> = { 4 | 'code-reviewer': [ 5 | 'Review code changes for quality, style, and best practices', 6 | 'Identify potential bugs and security issues', 7 | 'Ensure code follows project conventions', 8 | 'Provide constructive feedback and suggestions' 9 | ], 10 | 'bug-fixer': [ 11 | 'Analyze bug reports and error messages', 12 | 'Identify root causes of issues', 13 | 'Implement targeted fixes with minimal side effects', 14 | 'Test fixes thoroughly before deployment' 15 | ], 16 | 'feature-developer': [ 17 | 'Implement new features according to specifications', 18 | 'Design clean, maintainable code architecture', 19 | 'Integrate features with existing codebase', 20 | 'Write comprehensive tests for new functionality' 21 | ], 22 | 'refactoring-specialist': [ 23 | 'Identify code smells and improvement opportunities', 24 | 'Refactor code while maintaining functionality', 25 | 'Improve code organization and structure', 26 | 'Optimize performance where applicable' 27 | ], 28 | 'test-writer': [ 29 | 'Write comprehensive unit and integration tests', 30 | 'Ensure good test coverage across the codebase', 31 | 'Create test utilities and fixtures', 32 | 'Maintain and update existing tests' 33 | ], 34 | 'documentation-writer': [ 35 | 'Create clear, comprehensive documentation', 36 | 'Update existing documentation as code changes', 37 | 'Write helpful code comments and examples', 38 | 'Maintain README and API documentation' 39 | ], 40 | 'performance-optimizer': [ 41 | 'Identify performance bottlenecks', 42 | 'Optimize code for speed and efficiency', 43 | 'Implement caching strategies', 44 | 'Monitor and improve resource usage' 45 | ], 46 | 'security-auditor': [ 47 | 'Identify security vulnerabilities', 48 | 'Implement security best practices', 49 | 'Review dependencies for security issues', 50 | 'Ensure data protection and privacy compliance' 51 | ], 52 | 'backend-specialist': [ 53 | 'Design and implement server-side architecture', 54 | 'Create and maintain APIs and microservices', 55 | 'Optimize database queries and data models', 56 | 'Implement authentication and authorization', 57 | 'Handle server deployment and scaling' 58 | ], 59 | 'frontend-specialist': [ 60 | 'Design and implement user interfaces', 61 | 'Create responsive and accessible web applications', 62 | 'Optimize client-side performance and bundle sizes', 63 | 'Implement state management and routing', 64 | 'Ensure cross-browser compatibility' 65 | ], 66 | 'architect-specialist': [ 67 | 'Design overall system architecture and patterns', 68 | 'Define technical standards and best practices', 69 | 'Evaluate and recommend technology choices', 70 | 'Plan system scalability and maintainability', 71 | 'Create architectural documentation and diagrams' 72 | ], 73 | 'devops-specialist': [ 74 | 'Design and maintain CI/CD pipelines', 75 | 'Implement infrastructure as code', 76 | 'Configure monitoring and alerting systems', 77 | 'Manage container orchestration and deployments', 78 | 'Optimize cloud resources and cost efficiency' 79 | ], 80 | 'database-specialist': [ 81 | 'Design and optimize database schemas', 82 | 'Create and manage database migrations', 83 | 'Optimize query performance and indexing', 84 | 'Ensure data integrity and consistency', 85 | 'Implement backup and recovery strategies' 86 | ], 87 | 'mobile-specialist': [ 88 | 'Develop native and cross-platform mobile applications', 89 | 'Optimize mobile app performance and battery usage', 90 | 'Implement mobile-specific UI/UX patterns', 91 | 'Handle app store deployment and updates', 92 | 'Integrate push notifications and offline capabilities' 93 | ] 94 | }; 95 | 96 | export const AGENT_BEST_PRACTICES: Record<AgentType, string[]> = { 97 | 'code-reviewer': [ 98 | 'Focus on maintainability and readability', 99 | 'Consider the broader impact of changes', 100 | 'Be constructive and specific in feedback' 101 | ], 102 | 'bug-fixer': [ 103 | 'Reproduce the bug before fixing', 104 | 'Write tests to prevent regression', 105 | 'Document the fix for future reference' 106 | ], 107 | 'feature-developer': [ 108 | 'Follow existing patterns and conventions', 109 | 'Consider edge cases and error handling', 110 | 'Write tests alongside implementation' 111 | ], 112 | 'refactoring-specialist': [ 113 | 'Make small, incremental changes', 114 | 'Ensure tests pass after each refactor', 115 | 'Preserve existing functionality exactly' 116 | ], 117 | 'test-writer': [ 118 | 'Write tests that are clear and maintainable', 119 | 'Test both happy path and edge cases', 120 | 'Use descriptive test names' 121 | ], 122 | 'documentation-writer': [ 123 | 'Keep documentation up-to-date with code', 124 | 'Write from the user\'s perspective', 125 | 'Include practical examples' 126 | ], 127 | 'performance-optimizer': [ 128 | 'Measure before optimizing', 129 | 'Focus on actual bottlenecks', 130 | 'Don\'t sacrifice readability unnecessarily' 131 | ], 132 | 'security-auditor': [ 133 | 'Follow security best practices', 134 | 'Stay updated on common vulnerabilities', 135 | 'Consider the principle of least privilege' 136 | ], 137 | 'backend-specialist': [ 138 | 'Design APIs according the specification of the project', 139 | 'Implement proper error handling and logging', 140 | 'Use appropriate design patterns and clean architecture', 141 | 'Consider scalability and performance from the start', 142 | 'Implement comprehensive testing for business logic' 143 | ], 144 | 'frontend-specialist': [ 145 | 'Follow modern frontend development patterns', 146 | 'Optimize for accessibility and user experience', 147 | 'Implement responsive design principles', 148 | 'Use component-based architecture effectively', 149 | 'Optimize performance and loading times' 150 | ], 151 | 'architect-specialist': [ 152 | 'Consider long-term maintainability and scalability', 153 | 'Balance technical debt with business requirements', 154 | 'Document architectural decisions and rationale', 155 | 'Promote code reusability and modularity', 156 | 'Stay updated on industry trends and technologies' 157 | ], 158 | 'devops-specialist': [ 159 | 'Automate everything that can be automated', 160 | 'Implement infrastructure as code for reproducibility', 161 | 'Monitor system health proactively', 162 | 'Design for failure and implement proper fallbacks', 163 | 'Keep security and compliance in every deployment' 164 | ], 165 | 'database-specialist': [ 166 | 'Always benchmark queries before and after optimization', 167 | 'Plan migrations with rollback strategies', 168 | 'Use appropriate indexing strategies for workloads', 169 | 'Maintain data consistency across transactions', 170 | 'Document schema changes and their business impact' 171 | ], 172 | 'mobile-specialist': [ 173 | 'Test on real devices, not just simulators', 174 | 'Optimize for battery life and data usage', 175 | 'Follow platform-specific design guidelines', 176 | 'Implement proper offline-first strategies', 177 | 'Plan for app store review requirements early' 178 | ] 179 | }; -------------------------------------------------------------------------------- /src/generators/plans/templates/planTemplate.ts: -------------------------------------------------------------------------------- 1 | import { PlanTemplateContext } from './types'; 2 | 3 | export function renderPlanTemplate(context: PlanTemplateContext): string { 4 | const { title, slug, summary, agents, docs } = context; 5 | 6 | const relatedAgents = agents.length 7 | ? agents.map(agent => ` - "${agent.type}"`).join('\n') 8 | : ' - "documentation-writer"'; 9 | 10 | const agentTableRows = agents.length 11 | ? agents 12 | .map(agent => `| ${agent.title} | TODO: Describe why this agent is involved. | [${agent.title}](../agents/${agent.type}.md) | ${agent.responsibility} |`) 13 | .join('\n') 14 | : '| Documentation Writer | TODO: Describe why this agent is involved. | [Documentation Writer](../agents/documentation-writer.md) | Create clear, comprehensive documentation |'; 15 | 16 | const docsTableRows = docs.length 17 | ? docs 18 | .map(doc => `| ${doc.title} | [${doc.file}](../docs/${doc.file}) | ${doc.marker} | ${doc.primaryInputs} |`) 19 | .join('\n') 20 | : '| Documentation Index | [README.md](../docs/README.md) | agent-update:docs-index | Current docs directory listing |'; 21 | 22 | return `--- 23 | id: plan-${slug} 24 | ai_update_goal: "Define the stages, owners, and evidence required to complete ${title}." 25 | required_inputs: 26 | - "Task summary or issue link describing the goal" 27 | - "Relevant documentation sections from docs/README.md" 28 | - "Matching agent playbooks from agents/README.md" 29 | success_criteria: 30 | - "Stages list clear owners, deliverables, and success signals" 31 | - "Plan references documentation and agent resources that exist today" 32 | - "Follow-up actions and evidence expectations are recorded" 33 | related_agents: 34 | ${relatedAgents} 35 | --- 36 | 37 | <!-- agent-update:start:plan-${slug} --> 38 | # ${title} Plan 39 | 40 | > ${summary?.trim() || 'TODO: Summarize the desired outcome and the problem this plan addresses.'} 41 | 42 | ## Task Snapshot 43 | - **Primary goal:** TODO: Describe the outcome to achieve. 44 | - **Success signal:** TODO: Define how the team will know the plan worked. 45 | - **Key references:** 46 | - [Documentation Index](../docs/README.md) 47 | - [Agent Handbook](../agents/README.md) 48 | - [Plans Index](./README.md) 49 | 50 | ## Agent Lineup 51 | | Agent | Role in this plan | Playbook | First responsibility focus | 52 | | --- | --- | --- | --- | 53 | ${agentTableRows} 54 | 55 | ## Documentation Touchpoints 56 | | Guide | File | Task Marker | Primary Inputs | 57 | | --- | --- | --- | --- | 58 | ${docsTableRows} 59 | 60 | ## Risk Assessment 61 | Identify potential blockers, dependencies, and mitigation strategies before beginning work. 62 | 63 | ### Identified Risks 64 | | Risk | Probability | Impact | Mitigation Strategy | Owner | 65 | | --- | --- | --- | --- | --- | 66 | | TODO: Dependency on external team | Medium | High | Early coordination meeting, clear requirements | TODO: Name | 67 | | TODO: Insufficient test coverage | Low | Medium | Allocate time for test writing in Phase 2 | TODO: Name | 68 | 69 | ### Dependencies 70 | - **Internal:** TODO: List dependencies on other teams, services, or infrastructure 71 | - **External:** TODO: List dependencies on third-party services, vendors, or partners 72 | - **Technical:** TODO: List technical prerequisites or required upgrades 73 | 74 | ### Assumptions 75 | - TODO: Document key assumptions being made (e.g., "Assume current API schema remains stable") 76 | - TODO: Note what happens if assumptions prove false 77 | 78 | ## Resource Estimation 79 | 80 | ### Time Allocation 81 | | Phase | Estimated Effort | Calendar Time | Team Size | 82 | | --- | --- | --- | --- | 83 | | Phase 1 - Discovery | TODO: e.g., 2 person-days | 3-5 days | 1-2 people | 84 | | Phase 2 - Implementation | TODO: e.g., 5 person-days | 1-2 weeks | 2-3 people | 85 | | Phase 3 - Validation | TODO: e.g., 2 person-days | 3-5 days | 1-2 people | 86 | | **Total** | **TODO: total** | **TODO: total** | **-** | 87 | 88 | ### Required Skills 89 | - TODO: List required expertise (e.g., "React experience", "Database optimization", "Infrastructure knowledge") 90 | - TODO: Identify skill gaps and training needs 91 | 92 | ### Resource Availability 93 | - **Available:** TODO: List team members and their availability 94 | - **Blocked:** TODO: Note any team members with conflicting priorities 95 | - **Escalation:** TODO: Name of person to contact if resources are insufficient 96 | 97 | ## Working Phases 98 | ### Phase 1 — Discovery & Alignment 99 | **Steps** 100 | 1. TODO: Outline discovery tasks and assign the accountable owner. 101 | 2. TODO: Capture open questions that require clarification. 102 | 103 | **Commit Checkpoint** 104 | - After completing this phase, capture the agreed context and create a commit (for example, \`git commit -m "chore(plan): complete phase 1 discovery"\`). 105 | 106 | ### Phase 2 — Implementation & Iteration 107 | **Steps** 108 | 1. TODO: Note build tasks, pairing expectations, and review cadence. 109 | 2. TODO: Reference docs or playbooks to keep changes aligned. 110 | 111 | **Commit Checkpoint** 112 | - Summarize progress, update cross-links, and create a commit documenting the outcomes of this phase (for example, \`git commit -m "chore(plan): complete phase 2 implementation"\`). 113 | 114 | ### Phase 3 — Validation & Handoff 115 | **Steps** 116 | 1. TODO: Detail testing, verification, and documentation updates. 117 | 2. TODO: Document evidence the team must capture for maintainers. 118 | 119 | **Commit Checkpoint** 120 | - Record the validation evidence and create a commit signalling the handoff completion (for example, \`git commit -m "chore(plan): complete phase 3 validation"\`). 121 | 122 | ## Rollback Plan 123 | Document how to revert changes if issues arise during or after implementation. 124 | 125 | ### Rollback Triggers 126 | When to initiate rollback: 127 | - Critical bugs affecting core functionality 128 | - Performance degradation beyond acceptable thresholds 129 | - Data integrity issues detected 130 | - Security vulnerabilities introduced 131 | - User-facing errors exceeding alert thresholds 132 | 133 | ### Rollback Procedures 134 | #### Phase 1 Rollback 135 | - Action: Discard discovery branch, restore previous documentation state 136 | - Data Impact: None (no production changes) 137 | - Estimated Time: < 1 hour 138 | 139 | #### Phase 2 Rollback 140 | - Action: TODO: Revert commits, restore database to pre-migration snapshot 141 | - Data Impact: TODO: Describe any data loss or consistency concerns 142 | - Estimated Time: TODO: e.g., 2-4 hours 143 | 144 | #### Phase 3 Rollback 145 | - Action: TODO: Full deployment rollback, restore previous version 146 | - Data Impact: TODO: Document data synchronization requirements 147 | - Estimated Time: TODO: e.g., 1-2 hours 148 | 149 | ### Post-Rollback Actions 150 | 1. Document reason for rollback in incident report 151 | 2. Notify stakeholders of rollback and impact 152 | 3. Schedule post-mortem to analyze failure 153 | 4. Update plan with lessons learned before retry 154 | 155 | <!-- agent-readonly:guidance --> 156 | ## Agent Playbook Checklist 157 | 1. Pick the agent that matches your task. 158 | 2. Enrich the template with project-specific context or links. 159 | 3. Share the final prompt with your AI assistant. 160 | 4. Capture learnings in the relevant documentation file so future runs improve. 161 | 162 | ## Evidence & Follow-up 163 | - TODO: List artifacts to collect (logs, PR links, test runs, design notes). 164 | - TODO: Record follow-up actions or owners. 165 | 166 | <!-- agent-update:end --> 167 | `; 168 | } 169 | -------------------------------------------------------------------------------- /src/services/init/initService.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as fs from 'fs-extra'; 3 | import inquirer from 'inquirer'; 4 | import chalk from 'chalk'; 5 | 6 | import { FileMapper } from '../../utils/fileMapper'; 7 | import { DocumentationGenerator } from '../../generators/documentation/documentationGenerator'; 8 | import { AgentGenerator } from '../../generators/agents/agentGenerator'; 9 | import type { CLIInterface } from '../../utils/cliUI'; 10 | import type { TranslateFn, TranslationKey } from '../../utils/i18n'; 11 | import type { RepoStructure } from '../../types'; 12 | 13 | export interface InitCommandFlags { 14 | output?: string; 15 | include?: string[]; 16 | exclude?: string[]; 17 | verbose?: boolean; 18 | docsOnly?: boolean; 19 | agentsOnly?: boolean; 20 | } 21 | 22 | export interface InitServiceDependencies { 23 | ui: CLIInterface; 24 | t: TranslateFn; 25 | version: string; 26 | documentationGenerator?: DocumentationGenerator; 27 | agentGenerator?: AgentGenerator; 28 | fileMapperFactory?: (exclude: string[] | undefined) => FileMapper; 29 | } 30 | 31 | interface InitOptions { 32 | repoPath: string; 33 | outputDir: string; 34 | include?: string[]; 35 | exclude?: string[]; 36 | verbose: boolean; 37 | scaffoldDocs: boolean; 38 | scaffoldAgents: boolean; 39 | } 40 | 41 | export class InitService { 42 | private readonly ui: CLIInterface; 43 | private readonly t: TranslateFn; 44 | private readonly version: string; 45 | private readonly documentationGenerator: DocumentationGenerator; 46 | private readonly agentGenerator: AgentGenerator; 47 | private readonly fileMapperFactory: (exclude: string[] | undefined) => FileMapper; 48 | 49 | constructor(dependencies: InitServiceDependencies) { 50 | this.ui = dependencies.ui; 51 | this.t = dependencies.t; 52 | this.version = dependencies.version; 53 | this.documentationGenerator = dependencies.documentationGenerator ?? new DocumentationGenerator(); 54 | this.agentGenerator = dependencies.agentGenerator ?? new AgentGenerator(); 55 | this.fileMapperFactory = dependencies.fileMapperFactory ?? ((exclude?: string[]) => new FileMapper(exclude ?? [])); 56 | } 57 | 58 | async run(repoPath: string, type: string, rawOptions: InitCommandFlags): Promise<void> { 59 | const resolvedType = resolveScaffoldType(type, rawOptions, this.t); 60 | 61 | const options: InitOptions = { 62 | repoPath: path.resolve(repoPath), 63 | outputDir: path.resolve(rawOptions.output || './.context'), 64 | include: rawOptions.include, 65 | exclude: rawOptions.exclude || [], 66 | verbose: Boolean(rawOptions.verbose), 67 | scaffoldDocs: resolvedType === 'docs' || resolvedType === 'both', 68 | scaffoldAgents: resolvedType === 'agents' || resolvedType === 'both' 69 | }; 70 | 71 | if (!options.scaffoldDocs && !options.scaffoldAgents) { 72 | this.ui.displayWarning(this.t('warnings.scaffold.noneSelected')); 73 | return; 74 | } 75 | 76 | await this.ensurePaths(options); 77 | await this.confirmOverwriteIfNeeded(options); 78 | 79 | this.ui.displayWelcome(this.version); 80 | this.ui.displayProjectInfo(options.repoPath, options.outputDir, resolvedType); 81 | 82 | const fileMapper = this.fileMapperFactory(options.exclude); 83 | 84 | this.ui.displayStep(1, 3, this.t('steps.init.analyze')); 85 | this.ui.startSpinner(this.t('spinner.repo.scanning')); 86 | 87 | const repoStructure = await fileMapper.mapRepository(options.repoPath, options.include); 88 | this.ui.updateSpinner( 89 | this.t('spinner.repo.scanComplete', { 90 | fileCount: repoStructure.totalFiles, 91 | directoryCount: repoStructure.directories.length 92 | }), 93 | 'success' 94 | ); 95 | 96 | const { docsGenerated, agentsGenerated } = await this.generateScaffolds(options, repoStructure); 97 | 98 | this.ui.displayGenerationSummary(docsGenerated, agentsGenerated); 99 | this.ui.displaySuccess(this.t('success.scaffold.ready', { path: chalk.cyan(options.outputDir) })); 100 | } 101 | 102 | private async confirmOverwriteIfNeeded(options: InitOptions): Promise<void> { 103 | const prompts: Array<{ key: 'docs' | 'agents'; path: string }> = []; 104 | 105 | if (options.scaffoldDocs) { 106 | const docsPath = path.join(options.outputDir, 'docs'); 107 | if (await this.directoryHasContent(docsPath)) { 108 | prompts.push({ key: 'docs', path: docsPath }); 109 | } 110 | } 111 | 112 | if (options.scaffoldAgents) { 113 | const agentsPath = path.join(options.outputDir, 'agents'); 114 | if (await this.directoryHasContent(agentsPath)) { 115 | prompts.push({ key: 'agents', path: agentsPath }); 116 | } 117 | } 118 | 119 | for (const prompt of prompts) { 120 | const questionKey: TranslationKey = prompt.key === 'docs' 121 | ? 'prompts.init.confirmOverwriteDocs' 122 | : 'prompts.init.confirmOverwriteAgents'; 123 | 124 | const answer = await inquirer.prompt<{ overwrite: boolean }>([ 125 | { 126 | type: 'confirm', 127 | name: 'overwrite', 128 | default: false, 129 | message: this.t(questionKey, { path: prompt.path }) 130 | } 131 | ]); 132 | 133 | if (!answer.overwrite) { 134 | throw new Error(this.t('errors.init.overwriteDeclined')); 135 | } 136 | } 137 | } 138 | 139 | private async directoryHasContent(dirPath: string): Promise<boolean> { 140 | const exists = await fs.pathExists(dirPath); 141 | if (!exists) { 142 | return false; 143 | } 144 | 145 | const entries = await fs.readdir(dirPath); 146 | return entries.length > 0; 147 | } 148 | 149 | private async generateScaffolds(options: InitOptions, repoStructure: RepoStructure): Promise<{ docsGenerated: number; agentsGenerated: number }> { 150 | let docsGenerated = 0; 151 | let agentsGenerated = 0; 152 | 153 | if (options.scaffoldDocs) { 154 | this.ui.displayStep(2, 3, this.t('steps.init.docs')); 155 | this.ui.startSpinner(this.t('spinner.docs.creating')); 156 | docsGenerated = await this.documentationGenerator.generateDocumentation( 157 | repoStructure, 158 | options.outputDir, 159 | undefined, 160 | options.verbose 161 | ); 162 | this.ui.updateSpinner(this.t('spinner.docs.created', { count: docsGenerated }), 'success'); 163 | } 164 | 165 | if (options.scaffoldAgents) { 166 | this.ui.displayStep(3, options.scaffoldDocs ? 3 : 2, this.t('steps.init.agents')); 167 | this.ui.startSpinner(this.t('spinner.agents.creating')); 168 | agentsGenerated = await this.agentGenerator.generateAgentPrompts( 169 | repoStructure, 170 | options.outputDir, 171 | undefined, 172 | options.verbose 173 | ); 174 | this.ui.updateSpinner(this.t('spinner.agents.created', { count: agentsGenerated }), 'success'); 175 | } 176 | 177 | return { docsGenerated, agentsGenerated }; 178 | } 179 | 180 | private async ensurePaths(options: InitOptions): Promise<void> { 181 | const exists = await fs.pathExists(options.repoPath); 182 | if (!exists) { 183 | throw new Error(this.t('errors.common.repoMissing', { path: options.repoPath })); 184 | } 185 | 186 | await fs.ensureDir(options.outputDir); 187 | } 188 | } 189 | 190 | export function resolveScaffoldType(type: string, rawOptions: InitCommandFlags, t: TranslateFn): 'docs' | 'agents' | 'both' { 191 | const normalized = (type || 'both').toLowerCase(); 192 | const allowed = ['docs', 'agents', 'both']; 193 | 194 | if (!allowed.includes(normalized)) { 195 | throw new Error(t('errors.init.invalidType', { value: type, allowed: allowed.join(', ') })); 196 | } 197 | 198 | if (rawOptions.docsOnly) { 199 | return 'docs'; 200 | } 201 | if (rawOptions.agentsOnly) { 202 | return 'agents'; 203 | } 204 | 205 | return normalized as 'docs' | 'agents' | 'both'; 206 | } 207 | -------------------------------------------------------------------------------- /src/generators/documentation/documentationGenerator.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as fs from 'fs-extra'; 3 | import { RepoStructure } from '../../types'; 4 | import { GeneratorUtils } from '../shared'; 5 | import { 6 | DocumentationTemplateContext, 7 | GuideMeta, 8 | renderIndex, 9 | renderProjectOverview, 10 | renderArchitectureNotes, 11 | renderDevelopmentWorkflow, 12 | renderTestingStrategy, 13 | renderGlossary, 14 | renderDataFlow, 15 | renderSecurity, 16 | renderToolingGuide 17 | } from './templates'; 18 | import { getGuidesByKeys } from './guideRegistry'; 19 | 20 | interface DocSection { 21 | fileName: string; 22 | content: (context: DocumentationTemplateContext) => string; 23 | } 24 | 25 | interface DocumentationGenerationConfig { 26 | selectedDocs?: string[]; 27 | } 28 | 29 | export class DocumentationGenerator { 30 | constructor(..._legacyArgs: unknown[]) {} 31 | 32 | async generateDocumentation( 33 | repoStructure: RepoStructure, 34 | outputDir: string, 35 | config: DocumentationGenerationConfig = {}, 36 | verbose: boolean = false 37 | ): Promise<number> { 38 | const docsDir = path.join(outputDir, 'docs'); 39 | await GeneratorUtils.ensureDirectoryAndLog(docsDir, verbose, 'Generating documentation scaffold in'); 40 | 41 | const guidesToGenerate = getGuidesByKeys(config.selectedDocs); 42 | const context = this.buildContext(repoStructure, guidesToGenerate); 43 | const sections = this.getDocSections(guidesToGenerate); 44 | 45 | let created = 0; 46 | for (const section of sections) { 47 | const targetPath = path.join(docsDir, section.fileName); 48 | const content = section.content(context); 49 | await GeneratorUtils.writeFileWithLogging(targetPath, content, verbose, `Created ${section.fileName}`); 50 | created += 1; 51 | } 52 | 53 | await this.updateAgentGuideReferences(repoStructure, verbose); 54 | 55 | return created; 56 | } 57 | 58 | private buildContext( 59 | repoStructure: RepoStructure, 60 | guides: GuideMeta[] 61 | ): DocumentationTemplateContext { 62 | const topLevelStats = repoStructure.topLevelDirectoryStats ?? []; 63 | const topLevelDirectories = topLevelStats.length 64 | ? topLevelStats.map(stat => stat.name) 65 | : this.deriveTopLevelDirectories(repoStructure); 66 | 67 | const directoryStats = topLevelStats.length 68 | ? topLevelStats.map(stat => ({ name: stat.name, fileCount: stat.fileCount })) 69 | : topLevelDirectories.map(name => ({ 70 | name, 71 | fileCount: repoStructure.files.filter(file => file.relativePath.startsWith(`${name}/`)).length 72 | })); 73 | const primaryLanguages = GeneratorUtils.getTopFileExtensions(repoStructure, 5) 74 | .filter(([ext]) => !!ext) 75 | .map(([extension, count]) => ({ extension, count })); 76 | 77 | return { 78 | repoStructure, 79 | topLevelDirectories, 80 | primaryLanguages, 81 | directoryStats, 82 | guides 83 | }; 84 | } 85 | 86 | private deriveTopLevelDirectories(repoStructure: RepoStructure): string[] { 87 | const directorySet = new Set<string>(); 88 | repoStructure.directories.forEach(dir => { 89 | const [firstSegment] = dir.relativePath.split(/[\\/]/).filter(Boolean); 90 | if (firstSegment) { 91 | directorySet.add(firstSegment); 92 | } 93 | }); 94 | return Array.from(directorySet).sort(); 95 | } 96 | 97 | private getDocSections(guides: GuideMeta[]): DocSection[] { 98 | const sections: DocSection[] = [ 99 | { 100 | fileName: 'README.md', 101 | content: context => renderIndex(context) 102 | } 103 | ]; 104 | 105 | const renderMap: Record<string, (context: DocumentationTemplateContext) => string> = { 106 | 'project-overview': renderProjectOverview, 107 | architecture: renderArchitectureNotes, 108 | 'development-workflow': () => renderDevelopmentWorkflow(), 109 | 'testing-strategy': () => renderTestingStrategy(), 110 | glossary: renderGlossary, 111 | 'data-flow': renderDataFlow, 112 | security: () => renderSecurity(), 113 | tooling: () => renderToolingGuide() 114 | }; 115 | 116 | guides.forEach(guide => { 117 | const renderer = renderMap[guide.key]; 118 | if (!renderer) { 119 | return; 120 | } 121 | 122 | sections.push({ 123 | fileName: guide.file, 124 | content: renderer 125 | }); 126 | }); 127 | 128 | return sections; 129 | } 130 | 131 | private async updateAgentGuideReferences(repoStructure: RepoStructure, verbose: boolean): Promise<void> { 132 | const repoRoot = repoStructure.rootPath; 133 | const agentGuidePath = path.join(repoRoot, 'AGENTS.md'); 134 | 135 | try { 136 | const exists = await fs.pathExists(agentGuidePath); 137 | if (!exists) { 138 | const template = this.createDefaultAgentGuide(repoStructure); 139 | await fs.writeFile(agentGuidePath, template, 'utf-8'); 140 | GeneratorUtils.logProgress('Created AGENTS.md using the agents.md example starter.', verbose); 141 | return; 142 | } 143 | 144 | const content = await fs.readFile(agentGuidePath, 'utf-8'); 145 | const docsReference = '.context/docs/README.md'; 146 | const agentsReference = '.context/agents/README.md'; 147 | 148 | if (content.includes(docsReference) && content.includes(agentsReference)) { 149 | return; 150 | } 151 | 152 | const referencesBlock = `\n## AI Context References\n- Documentation index: \`${docsReference}\`\n- Agent playbooks: \`${agentsReference}\`\n`; 153 | const updatedContent = `${content.trimEnd()}${referencesBlock}\n`; 154 | 155 | await fs.writeFile(agentGuidePath, updatedContent, 'utf-8'); 156 | 157 | GeneratorUtils.logProgress('Linked AGENTS.md to generated docs and agent indexes.', verbose); 158 | } catch (error) { 159 | GeneratorUtils.logError('Failed to update AGENTS.md with documentation references', error, verbose); 160 | } 161 | } 162 | 163 | private createDefaultAgentGuide(repoStructure: RepoStructure): string { 164 | const directories = (repoStructure.topLevelDirectoryStats?.length 165 | ? repoStructure.topLevelDirectoryStats.map(stat => stat.name) 166 | : this.deriveTopLevelDirectories(repoStructure) 167 | ).filter(Boolean); 168 | 169 | const directorySection = directories.length 170 | ? directories 171 | .slice(0, 8) 172 | .map(dir => `- \`${dir}/\` — explain what lives here and when agents should edit it.`) 173 | .join('\n') 174 | : '- Document the major directories so agents know where to work.'; 175 | 176 | return `# AGENTS.md 177 | 178 | ## Dev environment tips 179 | - Install dependencies with \`npm install\` before running scaffolds. 180 | - Use \`npm run dev\` for the interactive TypeScript session that powers local experimentation. 181 | - Run \`npm run build\` to refresh the CommonJS bundle in \`dist/\` before shipping changes. 182 | - Store generated artefacts in \`.context/\` so reruns stay deterministic. 183 | 184 | ## Testing instructions 185 | - Execute \`npm run test\` to run the Jest suite. 186 | - Append \`-- --watch\` while iterating on a failing spec. 187 | - Trigger \`npm run build && npm run test\` before opening a PR to mimic CI. 188 | - Add or update tests alongside any generator or CLI changes. 189 | 190 | ## PR instructions 191 | - Follow Conventional Commits (for example, \`feat(scaffolding): add doc links\`). 192 | - Cross-link new scaffolds in \`docs/README.md\` and \`agents/README.md\` so future agents can find them. 193 | - Attach sample CLI output or generated markdown when behaviour shifts. 194 | - Confirm the built artefacts in \`dist/\` match the new source changes. 195 | 196 | ## Repository map 197 | ${directorySection} 198 | 199 | ## AI Context References 200 | - Documentation index: \`.context/docs/README.md\` 201 | - Agent playbooks: \`.context/agents/README.md\` 202 | - Contributor guide: \`CONTRIBUTING.md\` 203 | `; 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /src/utils/cliUI.ts: -------------------------------------------------------------------------------- 1 | import chalk from 'chalk'; 2 | import ora, { Ora } from 'ora'; 3 | import * as cliProgress from 'cli-progress'; 4 | import boxen from 'boxen'; 5 | import figures from 'figures'; 6 | 7 | import { TranslateFn, TranslationKey, TranslateParams } from './i18n'; 8 | 9 | export class CLIInterface { 10 | private spinner: Ora | null = null; 11 | private progressBar: cliProgress.SingleBar | null = null; 12 | private startTime: number = Date.now(); 13 | 14 | constructor(private readonly translate: TranslateFn) {} 15 | 16 | displayWelcome(version: string): void { 17 | const asciiArtLines = [ 18 | ' ___ _____ _____ ___________ ___________ _____ ', 19 | ' / _ \\|_ _| / __ \\ _ | _ \\ ___| ___ \\ / ___|', 20 | '/ /_\\ \\ | | | / \\/ | | | | | | |__ | |_/ /\\ `--. ', 21 | '| _ | | | | | | | | | | | | __|| / `--. \\', 22 | '| | | |_| |_ | \\__/\\ \\_/ / |/ /| |___| |\\ \\ /\\__/ /', 23 | '\\_| |_/\\___/ \\____/\\___/|___/ \\____/\\_| \\_|\\____/ ' 24 | ]; 25 | 26 | const palette = [ 27 | chalk.cyanBright, 28 | chalk.cyan, 29 | chalk.blueBright, 30 | chalk.blue, 31 | chalk.magentaBright, 32 | chalk.magenta 33 | ]; 34 | 35 | const banner = asciiArtLines 36 | .map((line, index) => palette[index % palette.length](` ${line}`)) 37 | .join('\n'); 38 | 39 | console.log('\n' + banner + '\n'); 40 | 41 | const nameLabel = this.t('cli.name'); 42 | const versionLabel = this.t('ui.version', { version }); 43 | const taglineLabel = this.t('cli.tagline'); 44 | 45 | const infoEntries = [ 46 | { 47 | icon: '◉', 48 | raw: nameLabel, 49 | colored: chalk.whiteBright.bold(nameLabel) 50 | }, 51 | { 52 | icon: '⌁', 53 | raw: versionLabel, 54 | colored: chalk.cyanBright(versionLabel) 55 | }, 56 | { 57 | icon: '⋰', 58 | raw: taglineLabel, 59 | colored: chalk.gray(taglineLabel) 60 | } 61 | ]; 62 | 63 | const maxWidth = infoEntries.reduce((width, entry) => Math.max(width, entry.raw.length), 0); 64 | const accent = chalk.cyanBright('╺' + '━'.repeat(maxWidth + 8) + '╸'); 65 | 66 | console.log(accent); 67 | infoEntries.forEach(entry => { 68 | const padding = ' '.repeat(Math.max(0, maxWidth - entry.raw.length)); 69 | console.log(`${chalk.cyanBright(entry.icon)} ${entry.colored}${padding}`); 70 | }); 71 | console.log(''); 72 | } 73 | 74 | displayProjectInfo(repoPath: string, outputDir: string, mode: string): void { 75 | console.log(chalk.bold(`\n${this.t('ui.projectConfiguration.title')}`)); 76 | console.log(chalk.gray('─'.repeat(50))); 77 | console.log(`${chalk.blue(figures.pointer)} ${this.t('ui.projectConfiguration.repository')} ${chalk.white(repoPath)}`); 78 | console.log(`${chalk.blue(figures.pointer)} ${this.t('ui.projectConfiguration.output')} ${chalk.white(outputDir)}`); 79 | console.log(`${chalk.blue(figures.pointer)} ${this.t('ui.projectConfiguration.mode')} ${chalk.white(mode)}`); 80 | console.log(chalk.gray('─'.repeat(50)) + '\n'); 81 | } 82 | 83 | startSpinner(text: string): void { 84 | this.spinner = ora({ 85 | text, 86 | spinner: 'dots', 87 | color: 'cyan' 88 | }).start(); 89 | } 90 | 91 | updateSpinner(text: string, type?: 'success' | 'fail' | 'warn' | 'info'): void { 92 | if (!this.spinner) return; 93 | 94 | switch (type) { 95 | case 'success': 96 | this.spinner.succeed(chalk.green(text)); 97 | break; 98 | case 'fail': 99 | this.spinner.fail(chalk.red(text)); 100 | break; 101 | case 'warn': 102 | this.spinner.warn(chalk.yellow(text)); 103 | break; 104 | case 'info': 105 | this.spinner.info(chalk.blue(text)); 106 | break; 107 | default: 108 | this.spinner.text = text; 109 | } 110 | } 111 | 112 | stopSpinner(success: boolean = true): void { 113 | if (!this.spinner) return; 114 | 115 | if (success) { 116 | this.spinner.stop(); 117 | } else { 118 | this.spinner.fail(); 119 | } 120 | this.spinner = null; 121 | } 122 | 123 | createProgressBar(total: number, title: string): void { 124 | this.progressBar = new cliProgress.SingleBar({ 125 | format: `${chalk.cyan(title)} |${chalk.cyan('{bar}')}| {percentage}% | {value}/{total} | {task}`, 126 | barCompleteChar: '\u2588', 127 | barIncompleteChar: '\u2591', 128 | hideCursor: true, 129 | stopOnComplete: true, 130 | clearOnComplete: false 131 | }, cliProgress.Presets.shades_classic); 132 | 133 | this.progressBar.start(total, 0, { 134 | task: this.t('ui.progress.starting') 135 | }); 136 | } 137 | 138 | updateProgress(current: number, task: string): void { 139 | if (!this.progressBar) return; 140 | this.progressBar.update(current, { task }); 141 | } 142 | 143 | completeProgress(): void { 144 | if (!this.progressBar) return; 145 | this.progressBar.stop(); 146 | this.progressBar = null; 147 | } 148 | 149 | displayAnalysisResults(totalFiles: number, totalDirs: number, totalSize: string): void { 150 | const results = boxen( 151 | chalk.bold.green(`${this.t('ui.analysis.complete.title')}\n\n`) + 152 | `${chalk.blue(`${this.t('ui.analysis.files')}:`)} ${chalk.white(totalFiles.toString())}\n` + 153 | `${chalk.blue(`${this.t('ui.analysis.directories')}:`)} ${chalk.white(totalDirs.toString())}\n` + 154 | `${chalk.blue(`${this.t('ui.analysis.totalSize')}:`)} ${chalk.white(totalSize)}`, 155 | { 156 | padding: 1, 157 | borderStyle: 'round', 158 | borderColor: 'green', 159 | align: 'left' 160 | } 161 | ); 162 | console.log('\n' + results); 163 | } 164 | 165 | displayFileTypeDistribution(distribution: Map<string, number>, totalFiles: number): void { 166 | console.log(chalk.bold(`\n${this.t('ui.fileTypeDistribution.title')}`)); 167 | console.log(chalk.gray('─'.repeat(50))); 168 | 169 | const sorted = Array.from(distribution.entries()) 170 | .sort((a, b) => b[1] - a[1]) 171 | .slice(0, 10); 172 | 173 | sorted.forEach(([ext, count]) => { 174 | const percentage = ((count / totalFiles) * 100).toFixed(1); 175 | const barLength = Math.round((count / totalFiles) * 30); 176 | const bar = '█'.repeat(barLength) + '░'.repeat(30 - barLength); 177 | 178 | console.log( 179 | `${chalk.yellow(ext.padEnd(15))} ${chalk.gray(bar)} ${chalk.white(count.toString().padStart(4))} (${percentage}%)` 180 | ); 181 | }); 182 | } 183 | 184 | displayGenerationSummary(docsGenerated: number, agentsGenerated: number): void { 185 | const elapsed = ((Date.now() - this.startTime) / 1000).toFixed(1); 186 | const summaryText = chalk.bold.green(`${this.t('ui.generationSummary.title')}\n\n`) + 187 | `${chalk.blue(`${this.t('ui.generationSummary.documentation')}:`)} ${chalk.white(docsGenerated.toString())}\n` + 188 | `${chalk.blue(`${this.t('ui.generationSummary.agents')}:`)} ${chalk.white(agentsGenerated.toString())}\n` + 189 | `${chalk.blue(`${this.t('ui.generationSummary.timeElapsed')}:`)} ${chalk.white(`${elapsed}s`)}\n\n` + 190 | chalk.dim(this.t('ui.generationSummary.nextStep')); 191 | 192 | const summary = boxen(summaryText, { 193 | padding: 1, 194 | borderStyle: 'double', 195 | borderColor: 'green', 196 | align: 'center' 197 | }); 198 | 199 | console.log('\n' + summary); 200 | } 201 | 202 | displayError(message: string, error?: Error): void { 203 | const errorBox = boxen( 204 | chalk.bold.red(`${this.t('ui.error.title')}\n\n`) + 205 | chalk.white(message) + 206 | (error ? '\n\n' + chalk.gray(error.stack || error.message) : ''), 207 | { 208 | padding: 1, 209 | borderStyle: 'round', 210 | borderColor: 'red', 211 | align: 'left' 212 | } 213 | ); 214 | 215 | console.error('\n' + errorBox); 216 | } 217 | 218 | displayInfo(title: string, message: string): void { 219 | console.log( 220 | '\n' + chalk.bold.blue(`ℹ️ ${title}`) + '\n' + 221 | chalk.gray('─'.repeat(50)) + '\n' + 222 | chalk.white(message) + '\n' 223 | ); 224 | } 225 | 226 | displaySuccess(message: string): void { 227 | console.log(chalk.green(`${figures.tick} ${message}`)); 228 | } 229 | 230 | displayWarning(message: string): void { 231 | console.log(chalk.yellow(`${figures.warning} ${message}`)); 232 | } 233 | 234 | displayStep(step: number, total: number, description: string): void { 235 | console.log( 236 | chalk.dim(`[${step}/${total}]`) + ' ' + 237 | chalk.bold(description) 238 | ); 239 | } 240 | 241 | formatBytes(bytes: number): string { 242 | if (bytes === 0) return '0 Bytes'; 243 | const k = 1024; 244 | const sizes = ['Bytes', 'KB', 'MB', 'GB']; 245 | const i = Math.floor(Math.log(bytes) / Math.log(k)); 246 | return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]; 247 | } 248 | 249 | private t(key: TranslationKey, params?: TranslateParams): string { 250 | return this.translate(key, params); 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/troubleshootingTemplate.ts: -------------------------------------------------------------------------------- 1 | 2 | export function renderTroubleshooting(): string { 3 | 4 | return ` 5 | <!-- agent-update:start:troubleshooting-guide --> 6 | # Troubleshooting Guide 7 | 8 | **Purpose:** Enable AI agents to diagnose and resolve issues using automated diagnostics and decision trees. 9 | 10 | **Agent Protocol:** 11 | 1. Run diagnostic script to gather facts 12 | 2. Match symptoms to known patterns 13 | 3. Execute resolution if confidence high (>80%) 14 | 4. Escalate to human if uncertain or high-risk 15 | 5. Log all diagnostic steps and resolution attempts 16 | 17 | ## Agent Diagnostic Script 18 | 19 | **Agent First Action:** Always run this diagnostic script to gather system state. 20 | 21 | \`\`\`bash 22 | #!/bin/bash 23 | # Save as: scripts/diagnose.sh 24 | # Agent runs this for ANY issue before attempting fixes 25 | 26 | echo "=== System Diagnostics $(date) ===" 27 | 28 | # 1. Application Status 29 | echo -e "\\n[Application Status]" 30 | curl -sf http://localhost:3000/health && echo "✓ App responding" || echo "✗ App not responding" 31 | 32 | # 2. Recent Errors 33 | echo -e "\\n[Recent Errors - Last 50 lines]" 34 | tail -50 logs/error.log 2>/dev/null || echo "No error log found" 35 | 36 | # 3. System Resources 37 | echo -e "\\n[System Resources]" 38 | echo "CPU: $(top -bn1 | grep "Cpu(s)" | awk '{print $2}')%" 39 | echo "Memory: $(free -h | awk '/^Mem:/ {print $3 "/" $2}')" 40 | echo "Disk: $(df -h / | awk 'NR==2 {print $5 " used"}')" 41 | 42 | # 4. Critical Services 43 | echo -e "\\n[Critical Services]" 44 | nc -z localhost 5432 && echo "✓ Database reachable" || echo "✗ Database unreachable" 45 | nc -z localhost 6379 && echo "✓ Redis reachable" || echo "✗ Redis unreachable" 46 | 47 | # 5. Recent Git Changes 48 | echo -e "\\n[Recent Changes]" 49 | git log --oneline -5 50 | 51 | # 6. Running Processes 52 | echo -e "\\n[Node Processes]" 53 | ps aux | grep -i node | grep -v grep 54 | 55 | echo -e "\\n=== Diagnostics Complete ===" 56 | \`\`\` 57 | 58 | **Agent Usage:** 59 | \`\`\`bash 60 | chmod +x scripts/diagnose.sh 61 | ./scripts/diagnose.sh > diagnostics-$(date +%Y%m%d-%H%M%S).txt 62 | # Agent parses output to determine issue category 63 | \`\`\` 64 | 65 | ## Agent Decision Tree 66 | 67 | **Agent follows this tree based on diagnostic output:** 68 | 69 | \`\`\` 70 | App not responding? 71 | ├─ YES → Check "Application Won't Start" section 72 | └─ NO → Continue 73 | 74 | Database unreachable? 75 | ├─ YES → Check "Database Issues" section 76 | └─ NO → Continue 77 | 78 | Error log has entries? 79 | ├─ YES → Parse errors, match to "Common Issues" patterns 80 | └─ NO → Check "Performance Issues" section 81 | 82 | CPU > 80% OR Memory > 90%? 83 | ├─ YES → Check "Performance/Memory Issues" section 84 | └─ NO → Escalate (unusual state, no clear pattern) 85 | \`\`\` 86 | 87 | **Agent Pattern Matching:** 88 | \`\`\`bash 89 | # Agent extracts last error from log 90 | LAST_ERROR=$(tail -1 logs/error.log) 91 | 92 | # Agent matches against known patterns 93 | if echo "$LAST_ERROR" | grep -q "ECONNREFUSED"; then 94 | # Database connection issue 95 | ISSUE_TYPE="database_connection" 96 | CONFIDENCE="high" 97 | elif echo "$LAST_ERROR" | grep -q "EADDRINUSE"; then 98 | # Port conflict 99 | ISSUE_TYPE="port_in_use" 100 | CONFIDENCE="high" 101 | elif echo "$LAST_ERROR" | grep -q "Cannot find module"; then 102 | # Missing dependency 103 | ISSUE_TYPE="missing_dependency" 104 | CONFIDENCE="high" 105 | else 106 | # Unknown pattern 107 | ISSUE_TYPE="unknown" 108 | CONFIDENCE="low" 109 | fi 110 | 111 | # Agent decides action based on confidence 112 | if [ "$CONFIDENCE" == "high" ]; then 113 | echo "Agent attempting auto-resolution for $ISSUE_TYPE" 114 | # Execute fix from "Common Issues" section 115 | else 116 | echo "Agent escalating: unknown error pattern" 117 | # Create issue with diagnostic output 118 | fi 119 | \`\`\` 120 | 121 | ## Common Issues & Solutions 122 | 123 | ### Application Won't Start 124 | 125 | **Agent Auto-Resolution: YES (safe to attempt)** 126 | 127 | #### Error Pattern: "EADDRINUSE" 128 | **Agent Detection:** Log contains "EADDRINUSE" or "port already in use" 129 | 130 | **Agent Resolution:** 131 | \`\`\`bash 132 | # Agent automatically kills process and restarts 133 | PORT=\${PORT:-3000} 134 | PID=$(lsof -ti:\$PORT) 135 | if [ -n "\$PID" ]; then 136 | echo "Killing process \$PID on port \$PORT" 137 | kill -9 \$PID 138 | sleep 2 139 | fi 140 | npm start 141 | # Agent verifies: curl -f http://localhost:\$PORT/health 142 | \`\`\` 143 | 144 | #### Error Pattern: "Cannot find module" 145 | **Agent Detection:** Log contains "Cannot find module" or "MODULE_NOT_FOUND" 146 | 147 | **Agent Resolution:** 148 | \`\`\`bash 149 | # Agent reinstalls dependencies 150 | rm -rf node_modules package-lock.json 151 | npm install 152 | npm start 153 | # Agent logs: "Resolved missing module issue by reinstalling dependencies" 154 | \`\`\` 155 | 156 | #### Error Pattern: "Missing environment variable" 157 | **Agent Detection:** Log contains "undefined" for env var or "Missing required environment variable" 158 | 159 | **Agent Resolution:** 160 | \`\`\`bash 161 | # Agent checks .env.example for required vars 162 | REQUIRED_VARS=$(grep -v "^#" .env.example | cut -d= -f1) 163 | for VAR in \$REQUIRED_VARS; do 164 | if [ -z "\${!VAR}" ]; then 165 | echo "ERROR: Missing required environment variable: \$VAR" 166 | echo "Agent cannot auto-resolve. Escalating to human." 167 | exit 1 168 | fi 169 | done 170 | \`\`\` 171 | **Agent Action:** ESCALATE (requires human to set values) 172 | 173 | --- 174 | 175 | ### Performance Issues 176 | 177 | **Agent Auto-Resolution: CONDITIONAL (depends on cause)** 178 | 179 | #### Error Pattern: High CPU Usage 180 | **Agent Detection:** CPU >80% sustained for >5 minutes 181 | 182 | **Agent Diagnostic:** 183 | \`\`\`bash 184 | # Agent identifies high CPU process 185 | top -bn1 | head -20 186 | ps aux --sort=-%cpu | head -10 187 | 188 | # Agent checks if it's the application 189 | APP_PID=$(ps aux | grep "node.*index.js" | grep -v grep | awk '{print \$2}') 190 | APP_CPU=$(ps aux | grep \$APP_PID | awk '{print \$3}') 191 | 192 | if (( \$(echo "\$APP_CPU > 80" | bc -l) )); then 193 | echo "Application consuming high CPU: \${APP_CPU}%" 194 | echo "Agent Action: Restart application" 195 | pm2 restart app || npm run restart 196 | else 197 | echo "External process consuming CPU - Agent escalating to human" 198 | fi 199 | \`\`\` 200 | 201 | --- 202 | 203 | ### Database Issues 204 | 205 | **Agent Auto-Resolution: CONDITIONAL** 206 | 207 | #### Error Pattern: "ECONNREFUSED" (Database) 208 | **Agent Detection:** Log contains "ECONNREFUSED" with database port (5432, 3306, 27017) 209 | 210 | **Agent Diagnostic:** 211 | \`\`\`bash 212 | # Check if database is running 213 | nc -z localhost 5432 && echo "DB reachable" || echo "DB not reachable" 214 | 215 | # If not reachable, attempt to start 216 | if ! nc -z localhost 5432; then 217 | echo "Agent attempting to start database..." 218 | docker-compose up -d db || sudo systemctl start postgresql 219 | sleep 5 220 | nc -z localhost 5432 && echo "✓ Database started" || echo "✗ Failed to start - escalating" 221 | fi 222 | \`\`\` 223 | 224 | --- 225 | 226 | ## Escalation Criteria 227 | 228 | **Agent must escalate immediately when:** 229 | - Unknown error pattern (confidence <50%) 230 | - Data corruption risk (database migration failures, validation errors) 231 | - Security issue detected (exposed credentials, unauthorized access) 232 | - Multiple resolution attempts failed (tried 2+ fixes, issue persists) 233 | - Human approval required (destructive operations, production changes) 234 | 235 | **Escalation format:** 236 | \`\`\`bash 237 | # Agent creates structured escalation 238 | cat > escalation-\$(date +%Y%m%d-%H%M%S).txt <<EOF 239 | ESCALATION REQUIRED 240 | 241 | Issue: [Brief description] 242 | Confidence: [low/medium] 243 | Risk Level: [low/medium/high] 244 | 245 | Symptoms: 246 | - [List observed symptoms] 247 | 248 | Diagnostics Run: 249 | - [Commands executed] 250 | - [Output summary] 251 | 252 | Resolution Attempts: 253 | - [What agent tried] 254 | - [Results] 255 | 256 | Recommendation: 257 | - [Suggested next steps for human] 258 | 259 | Diagnostic Files: 260 | - diagnostics-*.txt 261 | - logs/error.log (last 100 lines attached below) 262 | 263 | --- 264 | \$(tail -100 logs/error.log) 265 | EOF 266 | 267 | echo "Escalation created. Agent awaiting human intervention." 268 | \`\`\` 269 | 270 | <!-- agent-readonly:guidance --> 271 | ## AI Update Checklist 272 | 1. Review recent incident reports and add new common issues 273 | 2. Update diagnostic commands to match current tooling 274 | 3. Verify contact information and escalation paths are current 275 | 4. Add workarounds for newly discovered issues 276 | 5. Update log locations and monitoring tool links 277 | 6. Validate that debugging workflows match current setup 278 | 7. Add new error patterns from support tickets 279 | 280 | <!-- agent-readonly:sources --> 281 | ## Acceptable Sources 282 | - Post-mortem and incident reports 283 | - Support ticket patterns and resolutions 284 | - Production logs and error tracking systems 285 | - Team knowledge base and runbooks 286 | - Infrastructure and monitoring configurations 287 | 288 | <!-- agent-update:end --> 289 | `; 290 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # @ai-coders/context 2 | 3 | [![npm version](https://badge.fury.io/js/@ai-coders%2Fcontext.svg)](https://www.npmjs.com/package/@ai-coders/context) 4 | [![CI](https://github.com/vinilana/ai-coders-context/actions/workflows/ci.yml/badge.svg)](https://github.com/vinilana/ai-coders-context/actions/workflows/ci.yml) 5 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 6 | 7 | <img width="663" height="192" alt="image" src="https://github.com/user-attachments/assets/4b07f61d-6800-420a-ae91-6e952cbc790d" /> 8 | 9 | 10 | A lightweight CLI that scaffolds living documentation and AI-agent playbooks for any repository—no LLMs or API keys required. The generated structure gives teams a consistent starting point for knowledge sharing while keeping everything under version control. 11 | 12 | ## ⚙️ Requirements 13 | 14 | - Node.js 20+ (we currently test on 20, 22, 23, and 24) 15 | 16 | ## ✨ What You Get 17 | 18 | - 📚 `docs/` folder with a documentation index plus ready-to-edit guides (overview, architecture, workflow, testing) 19 | - 🤖 `agents/` folder containing playbooks for common engineering agents and a handy index 20 | - 🔁 Repeatable scaffolding that you can re-run as the project evolves 21 | - 🧭 Repository-aware templates that highlight top-level directories for quick orientation 22 | - 🧠 AI-ready front matter and `agent-update` markers so assistants know exactly what to refresh 23 | 24 | ## 📦 Installation 25 | 26 | Use `npx` to run the CLI without installing globally: 27 | 28 | ```bash 29 | npx @ai-coders/context 30 | ``` 31 | 32 | Or add it to your dev dependencies: 33 | 34 | ```bash 35 | npm install --save-dev @ai-coders/context 36 | ``` 37 | 38 | ## 🚀 Quick Start 39 | 40 | ```bash 41 | # Launch the interactive wizard 42 | npx @ai-coders/context 43 | 44 | 45 | # Scaffold docs and agents into ./.context 46 | npx @ai-coders/context init ./my-repo 47 | 48 | # Only generate docs 49 | npx @ai-coders/context init ./my-repo docs 50 | 51 | # Only generate agent playbooks, with a custom output directory 52 | npx @ai-coders/context init ./my-repo agents --output ./knowledge-base 53 | 54 | # Fill docs and agents with the repo context (preview the first 3 updates) 55 | npx @ai-coders/context fill ./my-repo --output ./.context --limit 3 56 | 57 | # Draft a collaboration plan seeded with agent and doc touchpoints 58 | npx @ai-coders/context plan release-readiness --output ./.context 59 | 60 | # Let the LLM enrich an existing plan with the latest context 61 | npx @ai-coders/context plan release-readiness --output ./.context --fill --dry-run 62 | ``` 63 | 64 | > ℹ️ The CLI pings npm for fresh releases at startup. Set `AI_CONTEXT_DISABLE_UPDATE_CHECK=true` to skip the check. 65 | 66 | After running the command, inspect the generated structure: 67 | 68 | ``` 69 | .context/ 70 | ├── agents/ 71 | │ ├── README.md 72 | │ ├── code-reviewer.md 73 | │ └── ... 74 | └── docs/ 75 | ├── README.md 76 | ├── architecture.md 77 | └── ... 78 | ``` 79 | 80 | Customize the Markdown files to reflect your project’s specifics and commit them alongside the code. 81 | 82 | ## 🧠 Guided Updates for AI Assistants 83 | 84 | Need help filling in the scaffold? Use [`prompts/update_scaffold_prompt.md`](./prompts/update_scaffold_prompt.md) as the canonical instruction set for any LLM or CLI agent. It walks through: 85 | 86 | - Gathering repository context and locating `agent-update`/`agent-fill` markers. 87 | - Updating documentation sections while satisfying the YAML front matter criteria. 88 | - Aligning agent playbooks with the refreshed docs and recording evidence for maintainers. 89 | 90 | Share that prompt verbatim with your assistant to keep updates consistent across teams. 91 | 92 | ### Available Doc Guides & Agent Types 93 | 94 | The scaffold includes the following guides and playbooks out of the box: 95 | 96 | - Docs: `project-overview`, `architecture`, `development-workflow`, `testing-strategy`, `glossary`, `data-flow`, `security`, `tooling` 97 | - Agents: `code-reviewer`, `bug-fixer`, `feature-developer`, `refactoring-specialist`, `test-writer`, `documentation-writer`, `performance-optimizer`, `security-auditor`, `backend-specialist`, `frontend-specialist`, `architect-specialist` 98 | 99 | ### AI Marker Reference 100 | 101 | - `<!-- agent-update:start:section-id --> … <!-- agent-update:end -->` wrap the sections that AI assistants should rewrite with up-to-date project knowledge. 102 | - `<!-- agent-fill:slot-id --> … <!-- /agent-fill -->` highlight inline placeholders that must be replaced with concrete details before removing the wrapper. 103 | - `<!-- agent-readonly:context -->` flags guidance that should remain as-is; treat the adjacent content as instructions rather than editable prose. 104 | 105 | When contributing, focus edits inside `agent-update` regions or `agent-fill` placeholders and leave `agent-readonly` guidance untouched unless you have explicit maintainer approval. 106 | 107 | ## 🛠 Commands 108 | 109 | ### `init` 110 | Scaffold documentation and/or agent playbooks. 111 | 112 | ``` 113 | Usage: ai-context init <repo-path> [type] 114 | 115 | Arguments: 116 | repo-path Path to the repository you want to scan 117 | type "docs", "agents", or "both" (default) 118 | 119 | Options: 120 | -o, --output <dir> Output directory (default: ./.context) 121 | --exclude <patterns...> Glob patterns to skip during the scan 122 | --include <patterns...> Glob patterns to explicitly include 123 | -v, --verbose Print detailed progress information 124 | -h, --help Display help for command 125 | ``` 126 | 127 | ### `fill` 128 | Use an LLM to refresh scaffolded docs and agent playbooks automatically. 129 | 130 | ``` 131 | Usage: ai-context fill <repo-path> 132 | 133 | Options: 134 | -o, --output <dir> Scaffold directory containing docs/ and agents/ (default: ./.context) 135 | -k, --api-key <key> API key for the selected LLM provider 136 | -m, --model <model> LLM model to use (default: x-ai/grok-4-fast) 137 | -p, --provider <name> Provider (openrouter only) 138 | --base-url <url> Custom base URL for OpenRouter 139 | --prompt <file> Instruction prompt to follow (optional; uses bundled instructions when omitted) 140 | --limit <number> Maximum number of files to update in one run 141 | -h, --help Display help for command 142 | ``` 143 | 144 | Under the hood, the command loads the prompt above, iterates over every Markdown file in `.context/docs` and `.context/agents`, and asks the LLM to produce the fully updated content. 145 | 146 | ### `plan` 147 | Create a collaboration plan that links documentation guides and agent playbooks, or fill an existing plan with LLM assistance. 148 | 149 | ``` 150 | Usage: ai-context plan <plan-name> 151 | 152 | Options: 153 | -o, --output <dir> Scaffold directory containing docs/ and agents/ (default: ./.context) 154 | --title <title> Custom title for the plan document 155 | --summary <text> Seed the plan with a short summary or goal statement 156 | -f, --force Overwrite the plan if it already exists (scaffold mode) 157 | --fill Use an LLM to fill or update the plan instead of scaffolding 158 | -r, --repo <path> Repository root to summarize for additional context (fill mode) 159 | -k, --api-key <key> API key for the selected LLM provider (fill mode) 160 | -m, --model <model> LLM model to use (default: x-ai/grok-4-fast) 161 | -p, --provider <name> Provider (openrouter only) 162 | --base-url <url> Custom base URL for OpenRouter 163 | --prompt <file> Instruction prompt to follow (optional; uses bundled instructions when omitted) 164 | --dry-run Preview changes without writing files 165 | --include <patterns...> Glob patterns to include during repository analysis 166 | --exclude <patterns...> Glob patterns to exclude from repository analysis 167 | -h, --help Display help for command 168 | ``` 169 | 170 | In scaffold mode the command creates `.context/plans/<plan-name>.md`, keeps a `plans/README.md` index, and reminds contributors to consult the agent handbook before delegating work to an AI assistant. In fill mode it will scaffold the plan automatically if it does not exist, then read the plan plus its referenced docs and agent playbooks, send that context to the LLM, and either preview or write the updated Markdown. 171 | 172 | 💡 Tip: run `npx @ai-coders/context` with no arguments to enter an interactive mode that guides you through scaffold and LLM-fill options. 173 | 174 | Prefer driving the update elsewhere? Just grab [`prompts/update_scaffold_prompt.md`](./prompts/update_scaffold_prompt.md) and run it in your favorite playground or agent host. When you’re ready to automate, drop your API key in `.env` (for example `OPENROUTER_API_KEY` and `OPENROUTER_MODEL`) and let `fill` handle the edits inline. 175 | 176 | ## 🧰 Local Development 177 | 178 | ```bash 179 | git clone https://github.com/vinilana/ai-coders-context.git 180 | cd ai-coders-context 181 | npm install 182 | npm run build 183 | npm run test 184 | ``` 185 | 186 | During development you can run the CLI directly against TypeScript sources: 187 | 188 | ```bash 189 | npm run dev -- ./path/to/repo 190 | ``` 191 | 192 | ## 🤝 Contributing 193 | 194 | See [`AGENTS.md`](./AGENTS.md) for contributor guidelines, coding standards, and release tips. Pull requests are welcome! 195 | 196 | ## 📄 License 197 | 198 | MIT © Vinícius Lana 199 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/migrationTemplate.ts: -------------------------------------------------------------------------------- 1 | 2 | export function renderMigration(): string { 3 | 4 | return ` 5 | <!-- agent-update:start:migration-guide --> 6 | # Migration Guide 7 | 8 | Complete guide for upgrading between versions, handling breaking changes, and migrating data. 9 | 10 | ## Current Version 11 | **Latest Stable:** TODO: Add current version (e.g., v2.5.0) 12 | **Release Date:** TODO: Add date 13 | 14 | ## Version Support Policy 15 | - **Current major version:** Full support with security patches and bug fixes 16 | - **Previous major version:** Security patches only for 12 months after new major release 17 | - **Older versions:** No longer supported, upgrade strongly recommended 18 | 19 | ## Before You Migrate 20 | ### Pre-Migration Checklist 21 | - [ ] **Backup all data** - Create complete backup of database and file storage 22 | - [ ] **Review changelog** - Read release notes for the target version 23 | - [ ] **Test in staging** - Run full migration in non-production environment first 24 | - [ ] **Check dependencies** - Verify all dependencies are compatible 25 | - [ ] **Notify stakeholders** - Communicate migration timeline and potential downtime 26 | - [ ] **Plan rollback** - Prepare rollback procedure in case of issues 27 | - [ ] **Schedule maintenance window** - Plan for adequate downtime if needed 28 | 29 | ### System Requirements 30 | Before upgrading, verify your environment meets the new requirements: 31 | - **Node.js version:** TODO: e.g., >= 18.0.0 32 | - **Database version:** TODO: e.g., PostgreSQL >= 14.0 33 | - **Operating System:** TODO: List supported OS versions 34 | - **Memory:** TODO: Minimum RAM requirements 35 | - **Disk space:** TODO: Required free space 36 | 37 | --- 38 | 39 | ## Migration Paths 40 | 41 | ### Upgrading to v3.0 (Latest) 42 | 43 | #### From v2.x → v3.0 44 | 45 | **Overview:** 46 | Major version upgrade with breaking changes. Migration estimated at 2-4 hours depending on data volume. 47 | 48 | **Breaking Changes:** 49 | 1. **API endpoint restructuring** 50 | - Old: \`/api/users/:id\` 51 | - New: \`/api/v3/users/:id\` 52 | - Migration: Update all API client code to use new base path 53 | 54 | 2. **Authentication changes** 55 | - JWT token format changed 56 | - Users must re-authenticate after upgrade 57 | - Migration: Implement token migration middleware or force re-login 58 | 59 | 3. **Database schema changes** 60 | - \`users\` table: Added \`email_verified_at\` column 61 | - \`sessions\` table: Renamed to \`user_sessions\` 62 | - Migration: Automatic via migration scripts 63 | 64 | 4. **Configuration format** 65 | - Environment variables renamed: 66 | - \`DB_HOST\` → \`DATABASE_URL\` 67 | - \`REDIS_HOST\` → \`CACHE_URL\` 68 | - Migration: Update .env file with new variable names 69 | 70 | **Step-by-Step Migration:** 71 | 72 | 1. **Create backup** 73 | \`\`\`bash 74 | # Database backup 75 | pg_dump -U postgres dbname > backup_v2.sql 76 | 77 | # Application backup 78 | tar -czf app_v2_backup.tar.gz /path/to/app 79 | \`\`\` 80 | 81 | 2. **Update dependencies** 82 | \`\`\`bash 83 | # Update package.json to v3.0 84 | npm install app-name@3.0.0 85 | 86 | # Install updated dependencies 87 | npm install 88 | \`\`\` 89 | 90 | 3. **Update configuration** 91 | \`\`\`bash 92 | # Update environment variables 93 | # Rename variables in .env file 94 | DATABASE_URL=postgresql://user:pass@localhost:5432/db 95 | CACHE_URL=redis://localhost:6379 96 | \`\`\` 97 | 98 | 4. **Run database migrations** 99 | \`\`\`bash 100 | # Dry run to preview changes 101 | npm run migrate:preview 102 | 103 | # Execute migrations 104 | npm run migrate:up 105 | 106 | # Verify migration success 107 | npm run migrate:status 108 | \`\`\` 109 | 110 | 5. **Update application code** 111 | - Update API client base URLs 112 | - Replace deprecated function calls 113 | - Update import paths if modules were reorganized 114 | 115 | 6. **Test the application** 116 | \`\`\`bash 117 | # Run full test suite 118 | npm test 119 | 120 | # Start application 121 | npm start 122 | 123 | # Verify critical functionality 124 | # - User authentication 125 | # - Core API endpoints 126 | # - Database operations 127 | \`\`\` 128 | 129 | 7. **Monitor after deployment** 130 | - Check error logs for unexpected issues 131 | - Monitor performance metrics 132 | - Verify data integrity 133 | 134 | **Rollback Procedure (if needed):** 135 | \`\`\`bash 136 | # Stop application 137 | npm stop 138 | 139 | # Restore database backup 140 | psql -U postgres dbname < backup_v2.sql 141 | 142 | # Revert to previous version 143 | npm install app-name@2.x.x 144 | 145 | # Restore old configuration 146 | # Revert environment variables to v2 format 147 | 148 | # Restart application 149 | npm start 150 | \`\`\` 151 | 152 | --- 153 | 154 | #### From v1.x → v3.0 155 | 156 | **Important:** Direct upgrade from v1.x to v3.0 is not supported. Please upgrade incrementally: 157 | 1. v1.x → v2.0 158 | 2. v2.0 → v3.0 159 | 160 | Refer to the sections below for each migration step. 161 | 162 | --- 163 | 164 | ### Upgrading to v2.0 165 | 166 | #### From v1.x → v2.0 167 | 168 | **Breaking Changes:** 169 | 1. **Database ORM change** 170 | - Migrated from custom ORM to Prisma 171 | - Requires regenerating database schema 172 | 173 | 2. **Configuration file format** 174 | - Moved from JSON config to environment variables 175 | - Migration: Convert config.json values to .env 176 | 177 | 3. **API response format** 178 | - Standardized error response structure 179 | - Migration: Update API client error handling 180 | 181 | **Step-by-Step Migration:** 182 | 183 | 1. **Backup and preparation** 184 | \`\`\`bash 185 | # Create backup 186 | pg_dump -U postgres dbname > backup_v1.sql 187 | 188 | # Install v2.0 189 | npm install app-name@2.0.0 190 | \`\`\` 191 | 192 | 2. **Configure environment** 193 | \`\`\`bash 194 | # Convert config.json to .env 195 | # Use provided migration script 196 | node scripts/config-to-env.js config.json > .env 197 | \`\`\` 198 | 199 | 3. **Migrate database schema** 200 | \`\`\`bash 201 | # Generate Prisma client 202 | npx prisma generate 203 | 204 | # Run migrations 205 | npx prisma migrate deploy 206 | \`\`\` 207 | 208 | 4. **Update code** 209 | - Replace old ORM queries with Prisma queries 210 | - Update error handling for new response format 211 | 212 | 5. **Test and deploy** 213 | 214 | --- 215 | 216 | ## Data Migration 217 | 218 | ### Database Migrations 219 | 220 | #### Running Migrations 221 | \`\`\`bash 222 | # Check current migration status 223 | npm run migrate:status 224 | 225 | # Preview migrations without applying 226 | npm run migrate:preview 227 | 228 | # Apply all pending migrations 229 | npm run migrate:up 230 | 231 | # Rollback last migration 232 | npm run migrate:down 233 | 234 | # Rollback to specific version 235 | npm run migrate:to 20240115_initial 236 | \`\`\` 237 | 238 | #### Creating Custom Migrations 239 | \`\`\`bash 240 | # Generate new migration file 241 | npm run migrate:create add_user_roles 242 | 243 | # Edit generated file at migrations/YYYYMMDD_add_user_roles.js 244 | # Implement up() and down() functions 245 | \`\`\` 246 | 247 | **Example Migration:** 248 | \`\`\`javascript 249 | // migrations/20240115_add_user_roles.js 250 | exports.up = async (db) => { 251 | await db.schema.createTable('user_roles', (table) => { 252 | table.increments('id').primary(); 253 | table.integer('user_id').references('users.id'); 254 | table.string('role').notNullable(); 255 | table.timestamps(); 256 | }); 257 | }; 258 | 259 | exports.down = async (db) => { 260 | await db.schema.dropTable('user_roles'); 261 | }; 262 | \`\`\` 263 | 264 | ### Data Transformation 265 | 266 | #### Large Dataset Migration 267 | For large datasets that can't be migrated in a single transaction: 268 | 269 | \`\`\`bash 270 | # Use batch migration script 271 | node scripts/migrate-data-batch.js \\ 272 | --source=old_table \\ 273 | --target=new_table \\ 274 | --batch-size=1000 \\ 275 | --delay=100 276 | 277 | # Monitor progress 278 | tail -f logs/migration.log 279 | \`\`\` 280 | 281 | #### Zero-Downtime Migration 282 | For production migrations with no downtime: 283 | 284 | 1. **Dual-write phase** 285 | - Write to both old and new schema 286 | - Read from old schema 287 | 288 | 2. **Backfill phase** 289 | - Migrate historical data in batches 290 | - Verify data integrity 291 | 292 | 3. **Dual-read phase** 293 | - Continue writing to both 294 | - Switch reads to new schema 295 | - Monitor for issues 296 | 297 | 4. **Cleanup phase** 298 | - Stop writing to old schema 299 | - Archive or drop old tables 300 | 301 | --- 302 | 303 | ## Deprecation Timeline 304 | 305 | ### Currently Deprecated (Remove in next major version) 306 | - **Function:** \`oldApiCall()\` → Use \`newApiCall()\` instead 307 | - **Endpoint:** \`/api/legacy/*\` → Use \`/api/v3/*\` instead 308 | - **Configuration:** \`LEGACY_MODE=true\` → No longer needed 309 | 310 | ### Deprecated in v2.0 (Removed in v3.0) 311 | - Legacy authentication system 312 | - Old database ORM 313 | - JSON configuration format 314 | 315 | --- 316 | 317 | ## Backward Compatibility 318 | 319 | ### Maintaining Compatibility 320 | - Feature flags to toggle between old and new behavior 321 | - Adapter layer for deprecated APIs 322 | - Automatic data format conversion where possible 323 | 324 | ### Compatibility Mode 325 | Enable compatibility mode during transition period: 326 | \`\`\`bash 327 | # .env 328 | COMPATIBILITY_MODE=true 329 | LEGACY_API_ENABLED=true 330 | \`\`\` 331 | 332 | **Warning:** Compatibility mode has performance overhead and should only be used temporarily. 333 | 334 | --- 335 | 336 | ## Common Migration Issues 337 | 338 | ### Issue: Migration Fails Midway 339 | **Solution:** 340 | 1. Check migration logs for specific error 341 | 2. Fix underlying issue (permissions, constraints, etc.) 342 | 3. Resume migration or rollback and retry 343 | 344 | ### Issue: Data Inconsistency After Migration 345 | **Solution:** 346 | 1. Run data validation scripts 347 | 2. Compare row counts between old and new tables 348 | 3. Use migration verification tools 349 | 4. Re-run failed batch migrations 350 | 351 | ### Issue: Performance Degradation 352 | **Solution:** 353 | 1. Rebuild database indexes: \`npm run db:reindex\` 354 | 2. Update query optimizer statistics: \`ANALYZE;\` 355 | 3. Review query plans for new schema 356 | 4. Consider adding new indexes for changed access patterns 357 | 358 | --- 359 | 360 | ## Migration Scripts & Tools 361 | 362 | ### Available Scripts 363 | \`\`\`bash 364 | # Data validation 365 | npm run validate:migration 366 | 367 | # Dry run migration (no changes) 368 | npm run migrate:dry-run 369 | 370 | # Generate migration report 371 | npm run migrate:report > migration-report.txt 372 | 373 | # Performance test after migration 374 | npm run test:performance 375 | \`\`\` 376 | 377 | ### Third-Party Tools 378 | - **DB migration tools:** Flyway, Liquibase 379 | - **Data comparison:** DbDiff, Schema Spy 380 | - **Performance testing:** Apache Bench, k6 381 | 382 | --- 383 | 384 | ## Getting Help 385 | 386 | ### Migration Support 387 | - **Documentation:** [Link to detailed migration docs] 388 | - **Support channel:** TODO: Add Slack/Discord channel 389 | - **Migration assistance:** TODO: Add contact for migration help 390 | - **Issue tracker:** TODO: Link to migration issues 391 | 392 | ### Reporting Migration Issues 393 | When reporting migration problems, include: 394 | 1. Source version and target version 395 | 2. Full error message and stack trace 396 | 3. Migration log output 397 | 4. Database type and version 398 | 5. Steps to reproduce 399 | 400 | <!-- agent-readonly:guidance --> 401 | ## AI Update Checklist 402 | 1. Update version numbers and release dates when new versions are published 403 | 2. Document new breaking changes with clear before/after examples 404 | 3. Add migration paths for each new major version 405 | 4. Update deprecation timeline as features are removed 406 | 5. Include actual migration scripts and examples from the codebase 407 | 6. Verify rollback procedures are tested and accurate 408 | 7. Update system requirements for new versions 409 | 410 | <!-- agent-readonly:sources --> 411 | ## Acceptable Sources 412 | - CHANGELOG.md and release notes 413 | - Migration scripts in migrations/ or db/migrate/ directory 414 | - Upgrade guides from official documentation 415 | - Post-mortem reports from previous migrations 416 | - Breaking changes documented in commit messages or PRs 417 | 418 | <!-- agent-update:end --> 419 | `; 420 | } -------------------------------------------------------------------------------- /src/services/fill/fillService.ts: -------------------------------------------------------------------------------- 1 | import * as path from 'path'; 2 | import * as fs from 'fs-extra'; 3 | import chalk from 'chalk'; 4 | import { glob } from 'glob'; 5 | 6 | import type { CLIInterface } from '../../utils/cliUI'; 7 | import type { TranslateFn } from '../../utils/i18n'; 8 | import { resolveScaffoldPrompt } from '../../utils/promptLoader'; 9 | import { FileMapper } from '../../utils/fileMapper'; 10 | import { LLMClientFactory } from '../llmClientFactory'; 11 | import type { LLMConfig, RepoStructure, UsageStats } from '../../types'; 12 | import type { BaseLLMClient } from '../baseLLMClient'; 13 | import { resolveLlmConfig } from '../shared/llmConfig'; 14 | 15 | export interface FillCommandFlags { 16 | output?: string; 17 | prompt?: string; 18 | include?: string[]; 19 | exclude?: string[]; 20 | verbose?: boolean; 21 | limit?: number; 22 | model?: string; 23 | provider?: LLMConfig['provider']; 24 | apiKey?: string; 25 | baseUrl?: string; 26 | } 27 | 28 | interface ResolvedFillOptions { 29 | repoPath: string; 30 | outputDir: string; 31 | docsDir: string; 32 | agentsDir: string; 33 | include?: string[]; 34 | exclude?: string[]; 35 | verbose: boolean; 36 | limit?: number; 37 | provider: LLMConfig['provider']; 38 | model: string; 39 | apiKey: string; 40 | baseUrl?: string; 41 | systemPrompt: string; 42 | } 43 | 44 | interface TargetFile { 45 | fullPath: string; 46 | relativePath: string; 47 | isAgent: boolean; 48 | content: string; 49 | } 50 | 51 | interface FillServiceDependencies { 52 | ui: CLIInterface; 53 | t: TranslateFn; 54 | version: string; 55 | defaultModel: string; 56 | fileMapperFactory?: (exclude: string[] | undefined) => FileMapper; 57 | llmClientFactory?: typeof LLMClientFactory; 58 | } 59 | 60 | export class FillService { 61 | private readonly ui: CLIInterface; 62 | private readonly t: TranslateFn; 63 | private readonly version: string; 64 | private readonly defaultModel: string; 65 | private readonly fileMapperFactory: (exclude: string[] | undefined) => FileMapper; 66 | private readonly llmClientFactory: typeof LLMClientFactory; 67 | 68 | constructor(dependencies: FillServiceDependencies) { 69 | this.ui = dependencies.ui; 70 | this.t = dependencies.t; 71 | this.version = dependencies.version; 72 | this.defaultModel = dependencies.defaultModel; 73 | this.fileMapperFactory = dependencies.fileMapperFactory ?? ((exclude?: string[]) => new FileMapper(exclude ?? [])); 74 | this.llmClientFactory = dependencies.llmClientFactory ?? LLMClientFactory; 75 | } 76 | 77 | async run(repoPath: string, rawOptions: FillCommandFlags): Promise<void> { 78 | const resolvedRepo = path.resolve(repoPath); 79 | const outputDir = path.resolve(rawOptions.output || './.context'); 80 | const docsDir = path.join(outputDir, 'docs'); 81 | const agentsDir = path.join(outputDir, 'agents'); 82 | 83 | await this.ensureDirectoryExists(docsDir, this.t('errors.fill.missingDocsScaffold')); 84 | await this.ensureDirectoryExists(agentsDir, this.t('errors.fill.missingAgentsScaffold')); 85 | 86 | const llmConfig = await resolveLlmConfig({ 87 | rawOptions: { 88 | provider: rawOptions.provider, 89 | model: rawOptions.model, 90 | apiKey: rawOptions.apiKey, 91 | baseUrl: rawOptions.baseUrl 92 | }, 93 | fallbackModel: this.defaultModel, 94 | t: this.t, 95 | factory: this.llmClientFactory 96 | }); 97 | 98 | const scaffoldPrompt = await resolveScaffoldPrompt( 99 | rawOptions.prompt, 100 | missingPath => this.t('errors.fill.promptMissing', { path: missingPath }) 101 | ); 102 | 103 | const options: ResolvedFillOptions = { 104 | repoPath: resolvedRepo, 105 | outputDir, 106 | docsDir, 107 | agentsDir, 108 | include: rawOptions.include, 109 | exclude: rawOptions.exclude, 110 | verbose: Boolean(rawOptions.verbose), 111 | limit: rawOptions.limit, 112 | provider: llmConfig.provider, 113 | model: llmConfig.model, 114 | apiKey: llmConfig.apiKey, 115 | baseUrl: llmConfig.baseUrl, 116 | systemPrompt: scaffoldPrompt.content 117 | }; 118 | 119 | this.displayPromptSource(scaffoldPrompt.path, scaffoldPrompt.source); 120 | 121 | this.ui.displayWelcome(this.version); 122 | this.ui.displayProjectInfo(options.repoPath, options.outputDir, `fill:${options.provider}`); 123 | 124 | const fileMapper = this.fileMapperFactory(options.exclude); 125 | this.ui.displayStep(1, 3, this.t('steps.fill.analyze')); 126 | this.ui.startSpinner(this.t('spinner.repo.scanning')); 127 | const repoStructure = await fileMapper.mapRepository(options.repoPath, options.include); 128 | this.ui.updateSpinner( 129 | this.t('spinner.repo.scanComplete', { 130 | fileCount: repoStructure.totalFiles, 131 | directoryCount: repoStructure.directories.length 132 | }), 133 | 'success' 134 | ); 135 | 136 | const targets = await this.collectTargets(options); 137 | if (targets.length === 0) { 138 | this.ui.displayWarning(this.t('warnings.fill.noTargets')); 139 | return; 140 | } 141 | 142 | const llmClient = this.llmClientFactory.createClient({ 143 | apiKey: options.apiKey, 144 | model: options.model, 145 | provider: options.provider, 146 | baseUrl: options.baseUrl 147 | }); 148 | 149 | const contextSummary = this.buildContextSummary(repoStructure); 150 | const results: Array<{ file: string; status: 'updated' | 'skipped' | 'failed'; message?: string }> = []; 151 | 152 | this.ui.displayStep(2, 3, this.t('steps.fill.processFiles', { count: targets.length, model: options.model })); 153 | 154 | for (const target of targets) { 155 | const result = await this.processTarget(target, llmClient, options, contextSummary); 156 | results.push(result); 157 | } 158 | 159 | this.ui.displayStep(3, 3, this.t('steps.fill.summary')); 160 | this.printLlmSummary(llmClient.getUsageStats(), results); 161 | this.ui.displaySuccess(this.t('success.fill.completed')); 162 | } 163 | 164 | private async processTarget( 165 | target: TargetFile, 166 | llmClient: BaseLLMClient, 167 | options: ResolvedFillOptions, 168 | contextSummary: string 169 | ): Promise<{ file: string; status: 'updated' | 'skipped' | 'failed'; message?: string }> { 170 | this.ui.startSpinner(this.t('spinner.fill.processing', { path: target.relativePath })); 171 | 172 | try { 173 | const userPrompt = this.buildUserPrompt(target.relativePath, target.content, contextSummary, target.isAgent); 174 | const updatedContent = await llmClient.generateText(userPrompt, options.systemPrompt); 175 | 176 | if (!updatedContent || !updatedContent.trim()) { 177 | this.ui.updateSpinner(this.t('spinner.fill.noContent', { path: target.relativePath }), 'warn'); 178 | return { file: target.relativePath, status: 'skipped', message: this.t('messages.fill.emptyResponse') }; 179 | } 180 | 181 | await fs.writeFile(target.fullPath, this.ensureTrailingNewline(updatedContent)); 182 | this.ui.updateSpinner(this.t('spinner.fill.updated', { path: target.relativePath }), 'success'); 183 | return { file: target.relativePath, status: 'updated' }; 184 | } catch (error) { 185 | this.ui.updateSpinner(this.t('spinner.fill.failed', { path: target.relativePath }), 'fail'); 186 | return { 187 | file: target.relativePath, 188 | status: 'failed', 189 | message: error instanceof Error ? error.message : String(error) 190 | }; 191 | } 192 | } 193 | 194 | private async collectTargets(options: ResolvedFillOptions): Promise<TargetFile[]> { 195 | const docFiles = await glob('**/*.md', { cwd: options.docsDir, absolute: true }); 196 | const agentFiles = await glob('**/*.md', { cwd: options.agentsDir, absolute: true }); 197 | const candidates = [...docFiles, ...agentFiles]; 198 | 199 | const targets: TargetFile[] = []; 200 | 201 | for (const fullPath of candidates) { 202 | const content = await fs.readFile(fullPath, 'utf-8'); 203 | const isAgent = fullPath.includes(`${path.sep}agents${path.sep}`); 204 | const relativePath = path.relative(options.outputDir, fullPath); 205 | targets.push({ fullPath, relativePath, isAgent, content }); 206 | 207 | if (options.limit && targets.length >= options.limit) { 208 | break; 209 | } 210 | } 211 | 212 | return targets; 213 | } 214 | 215 | private displayPromptSource(promptPath: string | undefined, source: 'custom' | 'package' | 'builtin'): void { 216 | if (source === 'custom' && promptPath) { 217 | this.ui.displayInfo( 218 | this.t('info.prompt.title'), 219 | this.t('info.prompt.usingCustom', { path: this.displayablePath(promptPath) }) 220 | ); 221 | return; 222 | } 223 | 224 | if (source === 'package' && promptPath) { 225 | this.ui.displayInfo( 226 | this.t('info.prompt.title'), 227 | this.t('info.prompt.usingPackage', { path: this.displayablePath(promptPath) }) 228 | ); 229 | return; 230 | } 231 | 232 | this.ui.displayInfo(this.t('info.prompt.title'), this.t('info.prompt.usingBundled')); 233 | } 234 | 235 | private displayablePath(promptPath: string): string { 236 | const relative = path.relative(process.cwd(), promptPath); 237 | return relative || promptPath; 238 | } 239 | 240 | private async ensureDirectoryExists(dir: string, message: string): Promise<void> { 241 | const exists = await fs.pathExists(dir); 242 | if (!exists) { 243 | throw new Error(message); 244 | } 245 | } 246 | 247 | private buildContextSummary(repoStructure: RepoStructure): string { 248 | const directories = new Set<string>(); 249 | repoStructure.directories.forEach(dir => { 250 | const [first] = dir.relativePath.split(/[\\/]/).filter(Boolean); 251 | if (first) { 252 | directories.add(first); 253 | } 254 | }); 255 | 256 | const topDirs = Array.from(directories).sort().slice(0, 12); 257 | const totalSizeMb = (repoStructure.totalSize / (1024 * 1024)).toFixed(2); 258 | 259 | return [ 260 | `Top-level directories: ${topDirs.length ? topDirs.join(', ') : 'n/a'}`, 261 | `Total files scanned: ${repoStructure.totalFiles}`, 262 | `Repository size (approx.): ${totalSizeMb} MB` 263 | ].join('\n'); 264 | } 265 | 266 | private buildUserPrompt(relativePath: string, currentContent: string, contextSummary: string, isAgent: boolean): string { 267 | const guidance: string[] = [ 268 | '- Preserve YAML front matter and existing `agent-update` sections.', 269 | '- Replace TODOs and resolve `agent-fill` placeholders with concrete information.', 270 | '- Ensure success criteria in the front matter are satisfied.', 271 | '- Return only the full updated Markdown for this file.' 272 | ]; 273 | 274 | if (isAgent) { 275 | guidance.push('- Keep agent responsibilities, best practices, and documentation touchpoints aligned with the latest docs.'); 276 | } else { 277 | guidance.push('- Maintain accurate cross-links between docs and referenced resources.'); 278 | } 279 | 280 | return [ 281 | `Target file: ${relativePath}`, 282 | 'Repository summary:', 283 | contextSummary, 284 | '', 285 | 'Guidance:', 286 | ...guidance, 287 | '', 288 | 'Current content:', 289 | '<file>', 290 | currentContent, 291 | '</file>' 292 | ].join('\n'); 293 | } 294 | 295 | private printLlmSummary( 296 | usage: UsageStats, 297 | results: Array<{ file: string; status: 'updated' | 'skipped' | 'failed'; message?: string }> 298 | ): void { 299 | const updated = results.filter(result => result.status === 'updated').length; 300 | const skipped = results.filter(result => result.status === 'skipped').length; 301 | const failed = results.filter(result => result.status === 'failed'); 302 | 303 | console.log('\n' + chalk.bold('📄 LLM Fill Summary')); 304 | console.log(chalk.gray('─'.repeat(50))); 305 | console.log(`${chalk.blue('Updated files:')} ${chalk.white(updated.toString())}`); 306 | console.log(`${chalk.blue('Skipped files:')} ${chalk.white(skipped.toString())}`); 307 | console.log(`${chalk.blue('Failures:')} ${failed.length}`); 308 | 309 | if (usage.totalCalls > 0) { 310 | console.log(chalk.gray('─'.repeat(50))); 311 | console.log(`${chalk.blue('LLM calls:')} ${usage.totalCalls}`); 312 | console.log(`${chalk.blue('Prompt tokens:')} ${usage.totalPromptTokens}`); 313 | console.log(`${chalk.blue('Completion tokens:')} ${usage.totalCompletionTokens}`); 314 | console.log(`${chalk.blue('Model:')} ${usage.model}`); 315 | } 316 | 317 | if (failed.length > 0) { 318 | console.log(chalk.gray('─'.repeat(50))); 319 | failed.forEach(item => { 320 | console.log(`${chalk.red('✖')} ${chalk.white(item.file)} — ${chalk.gray(item.message || 'Unknown error')}`); 321 | }); 322 | } 323 | } 324 | 325 | private ensureTrailingNewline(content: string): string { 326 | return content.endsWith('\n') ? content : `${content}\n`; 327 | } 328 | } 329 | -------------------------------------------------------------------------------- /src/generators/documentation/templates/apiReferenceTemplate.ts: -------------------------------------------------------------------------------- 1 | export function renderApiReference(): string { 2 | return `<!-- agent-update:start:api-reference --> 3 | # API Reference 4 | 5 | **Purpose:** Enable AI agents to programmatically interact with all API endpoints. 6 | 7 | **Agent Usage Instructions:** 8 | - Use documented curl commands as templates for requests 9 | - Verify expected response codes match actual responses 10 | - Handle authentication token expiration automatically 11 | - Parse error responses to determine retry strategy 12 | - Log all API interactions for debugging 13 | 14 | ## Base URLs 15 | 16 | **Agent Configuration:** 17 | \`\`\`bash 18 | # Set as environment variables for easier switching 19 | export API_BASE_DEV="http://localhost:3000/api" 20 | export API_BASE_STAGING="https://staging-api.example.com" # TODO: Update 21 | export API_BASE_PROD="https://api.example.com" # TODO: Update 22 | 23 | # Agent uses based on context 24 | API_BASE=$API_BASE_STAGING # Change as needed 25 | \`\`\` 26 | 27 | ## Authentication 28 | 29 | **Agent Authentication Flow:** 30 | 31 | ### Step 1: Obtain Token 32 | \`\`\`bash 33 | # Agent executes login 34 | TOKEN_RESPONSE=$(curl -s -X POST "\${API_BASE}/auth/login" \\ 35 | -H "Content-Type: application/json" \\ 36 | -d '{ 37 | "email": "agent@example.com", 38 | "password": "'\${AGENT_PASSWORD}'" 39 | }') 40 | 41 | # Agent extracts token 42 | TOKEN=$(echo \$TOKEN_RESPONSE | jq -r '.token') 43 | EXPIRES_IN=$(echo \$TOKEN_RESPONSE | jq -r '.expiresIn') 44 | 45 | # Agent stores token with expiration 46 | echo "\$TOKEN" > .api-token 47 | echo \$((\$(date +%s) + \$EXPIRES_IN)) > .api-token-expires 48 | 49 | # Expected response structure 50 | # { 51 | # "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", 52 | # "expiresIn": 3600, 53 | # "refreshToken": "refresh_token_here", 54 | # "user": {"id": "123", "email": "agent@example.com"} 55 | # } 56 | \`\`\` 57 | 58 | ### Step 2: Use Token in Requests 59 | \`\`\`bash 60 | # Agent includes token in all authenticated requests 61 | curl -X GET "\${API_BASE}/resources" \\ 62 | -H "Authorization: Bearer \$TOKEN" \\ 63 | -H "Content-Type: application/json" 64 | \`\`\` 65 | 66 | ### Step 3: Handle Token Expiration 67 | \`\`\`bash 68 | # Agent checks token expiration before each request 69 | function check_token() { 70 | if [ ! -f .api-token ] || [ ! -f .api-token-expires ]; then 71 | return 1 # Token missing, need to authenticate 72 | fi 73 | 74 | EXPIRES=$(cat .api-token-expires) 75 | NOW=$(date +%s) 76 | 77 | if [ $NOW -ge $EXPIRES ]; then 78 | return 1 # Token expired 79 | fi 80 | 81 | return 0 # Token valid 82 | } 83 | 84 | # Agent refresh flow (if 401 received) 85 | if ! check_token; then 86 | echo "Token expired or missing, re-authenticating..." 87 | # Re-run Step 1 88 | fi 89 | \`\`\` 90 | 91 | **Agent Error Handling:** 92 | - 401 response → Token invalid/expired, re-authenticate 93 | - 403 response → Permission denied, check agent has required role 94 | - 429 response → Rate limited, wait and retry (see Rate Limiting section) 95 | 96 | ### Authorization & Permissions 97 | - Document role-based access control (RBAC) if applicable 98 | - List permission levels and their capabilities 99 | - Explain scope-based authorization for OAuth 100 | 101 | ## API Versioning 102 | - **Current Version:** v1 103 | - **Versioning Strategy:** TODO: URL path (/v1/), header, or query parameter 104 | - **Deprecation Policy:** TODO: Document how versions are deprecated 105 | 106 | ## Rate Limiting 107 | 108 | **Agent Must Implement:** Check rate limit headers and back off when necessary. 109 | 110 | \`\`\`bash 111 | # Agent rate limit handler 112 | function make_api_request() { 113 | local url=$1 114 | local method=$2 115 | local data=$3 116 | 117 | RESPONSE=$(curl -i -X "$method" "$url" \\ 118 | -H "Authorization: Bearer $TOKEN" \\ 119 | -H "Content-Type: application/json" \\ 120 | -d "$data" 2>&1) 121 | 122 | # Agent extracts rate limit headers 123 | LIMIT=$(echo "$RESPONSE" | grep -i "x-ratelimit-limit" | cut -d: -f2 | tr -d ' \\r') 124 | REMAINING=$(echo "$RESPONSE" | grep -i "x-ratelimit-remaining" | cut -d: -f2 | tr -d ' \\r') 125 | RESET=$(echo "$RESPONSE" | grep -i "x-ratelimit-reset" | cut -d: -f2 | tr -d ' \\r') 126 | 127 | # Agent checks if approaching limit 128 | if [ "\$REMAINING" -lt "10" ]; then 129 | NOW=$(date +%s) 130 | WAIT=\$((RESET - NOW)) 131 | echo "Rate limit low (\$REMAINING remaining), waiting \${WAIT}s until reset..." 132 | sleep \$WAIT 133 | fi 134 | 135 | # Agent handles 429 (rate limited) 136 | HTTP_CODE=$(echo "\$RESPONSE" | grep "HTTP/" | awk '{print \$2}') 137 | if [ "\$HTTP_CODE" == "429" ]; then 138 | RETRY_AFTER=$(echo "\$RESPONSE" | grep -i "retry-after" | cut -d: -f2 | tr -d ' \\r') 139 | echo "Rate limited, retrying after \${RETRY_AFTER}s..." 140 | sleep "\$RETRY_AFTER" 141 | make_api_request "\$url" "\$method" "\$data" # Retry 142 | fi 143 | 144 | echo "$RESPONSE" 145 | } 146 | \`\`\` 147 | 148 | **Rate Limit:** TODO: e.g., 1000 requests per hour 149 | **Headers Agent Should Monitor:** 150 | - \`X-RateLimit-Limit\`: Maximum allowed 151 | - \`X-RateLimit-Remaining\`: Remaining in window 152 | - \`X-RateLimit-Reset\`: Reset time (Unix timestamp) 153 | - \`Retry-After\`: Seconds to wait (when 429 received) 154 | 155 | ## Common Headers 156 | ### Request Headers 157 | - \`Content-Type: application/json\` - Required for POST/PUT/PATCH 158 | - \`Authorization: Bearer <token>\` - Authentication token 159 | - \`X-Request-ID: <uuid>\` - Optional request tracking ID 160 | 161 | ### Response Headers 162 | - \`Content-Type: application/json\` 163 | - \`X-Request-ID: <uuid>\` - Echo of request ID for tracking 164 | - \`X-Response-Time: <ms>\` - Server processing time 165 | 166 | ## Endpoints 167 | 168 | ### Authentication Endpoints 169 | 170 | #### POST /auth/register 171 | Register a new user account. 172 | 173 | **Request:** 174 | \`\`\`json 175 | { 176 | "email": "user@example.com", 177 | "password": "securePassword123", 178 | "name": "John Doe" 179 | } 180 | \`\`\` 181 | 182 | **Response (201 Created):** 183 | \`\`\`json 184 | { 185 | "id": "user_123", 186 | "email": "user@example.com", 187 | "name": "John Doe", 188 | "createdAt": "2024-01-15T10:30:00Z" 189 | } 190 | \`\`\` 191 | 192 | **Errors:** 193 | - \`400\`: Validation error (invalid email, weak password) 194 | - \`409\`: Email already registered 195 | 196 | --- 197 | 198 | #### POST /auth/login 199 | Authenticate user and receive access token. 200 | 201 | **Request:** 202 | \`\`\`json 203 | { 204 | "email": "user@example.com", 205 | "password": "securePassword123" 206 | } 207 | \`\`\` 208 | 209 | **Response (200 OK):** 210 | \`\`\`json 211 | { 212 | "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...", 213 | "refreshToken": "refresh_token_here", 214 | "expiresIn": 3600, 215 | "user": { 216 | "id": "user_123", 217 | "email": "user@example.com", 218 | "name": "John Doe" 219 | } 220 | } 221 | \`\`\` 222 | 223 | **Errors:** 224 | - \`401\`: Invalid credentials 225 | - \`429\`: Too many failed attempts 226 | 227 | --- 228 | 229 | ### Resource Endpoints 230 | 231 | #### GET /api/resources 232 | List all resources with pagination. 233 | 234 | **Query Parameters:** 235 | - \`page\` (integer, default: 1): Page number 236 | - \`limit\` (integer, default: 20, max: 100): Items per page 237 | - \`sort\` (string): Sort field (e.g., "createdAt", "-name" for desc) 238 | - \`filter\` (string): Filter criteria (implementation-specific) 239 | 240 | **Response (200 OK):** 241 | \`\`\`json 242 | { 243 | "data": [ 244 | { 245 | "id": "res_123", 246 | "name": "Resource Name", 247 | "status": "active", 248 | "createdAt": "2024-01-15T10:30:00Z", 249 | "updatedAt": "2024-01-15T10:30:00Z" 250 | } 251 | ], 252 | "pagination": { 253 | "page": 1, 254 | "limit": 20, 255 | "total": 100, 256 | "pages": 5 257 | } 258 | } 259 | \`\`\` 260 | 261 | **Errors:** 262 | - \`401\`: Unauthorized (missing or invalid token) 263 | - \`403\`: Forbidden (insufficient permissions) 264 | 265 | --- 266 | 267 | #### GET /api/resources/:id 268 | Get a specific resource by ID. 269 | 270 | **Path Parameters:** 271 | - \`id\` (string, required): Resource identifier 272 | 273 | **Response (200 OK):** 274 | \`\`\`json 275 | { 276 | "id": "res_123", 277 | "name": "Resource Name", 278 | "description": "Detailed description", 279 | "status": "active", 280 | "metadata": { 281 | "key": "value" 282 | }, 283 | "createdAt": "2024-01-15T10:30:00Z", 284 | "updatedAt": "2024-01-15T10:30:00Z" 285 | } 286 | \`\`\` 287 | 288 | **Errors:** 289 | - \`401\`: Unauthorized 290 | - \`404\`: Resource not found 291 | 292 | --- 293 | 294 | #### POST /api/resources 295 | Create a new resource. 296 | 297 | **Request:** 298 | \`\`\`json 299 | { 300 | "name": "New Resource", 301 | "description": "Resource description", 302 | "status": "active", 303 | "metadata": { 304 | "key": "value" 305 | } 306 | } 307 | \`\`\` 308 | 309 | **Response (201 Created):** 310 | \`\`\`json 311 | { 312 | "id": "res_124", 313 | "name": "New Resource", 314 | "description": "Resource description", 315 | "status": "active", 316 | "metadata": { 317 | "key": "value" 318 | }, 319 | "createdAt": "2024-01-15T11:00:00Z", 320 | "updatedAt": "2024-01-15T11:00:00Z" 321 | } 322 | \`\`\` 323 | 324 | **Errors:** 325 | - \`400\`: Validation error 326 | - \`401\`: Unauthorized 327 | - \`403\`: Forbidden 328 | 329 | --- 330 | 331 | #### PUT /api/resources/:id 332 | Update an existing resource (full replacement). 333 | 334 | **Path Parameters:** 335 | - \`id\` (string, required): Resource identifier 336 | 337 | **Request:** 338 | \`\`\`json 339 | { 340 | "name": "Updated Resource", 341 | "description": "Updated description", 342 | "status": "inactive", 343 | "metadata": { 344 | "key": "newValue" 345 | } 346 | } 347 | \`\`\` 348 | 349 | **Response (200 OK):** 350 | Returns updated resource object. 351 | 352 | **Errors:** 353 | - \`400\`: Validation error 354 | - \`401\`: Unauthorized 355 | - \`404\`: Resource not found 356 | 357 | --- 358 | 359 | #### PATCH /api/resources/:id 360 | Partially update a resource. 361 | 362 | **Path Parameters:** 363 | - \`id\` (string, required): Resource identifier 364 | 365 | **Request:** 366 | \`\`\`json 367 | { 368 | "status": "inactive" 369 | } 370 | \`\`\` 371 | 372 | **Response (200 OK):** 373 | Returns updated resource object. 374 | 375 | --- 376 | 377 | #### DELETE /api/resources/:id 378 | Delete a resource. 379 | 380 | **Path Parameters:** 381 | - \`id\` (string, required): Resource identifier 382 | 383 | **Response (204 No Content)** 384 | 385 | **Errors:** 386 | - \`401\`: Unauthorized 387 | - \`404\`: Resource not found 388 | - \`409\`: Conflict (resource has dependencies) 389 | 390 | --- 391 | 392 | ## Webhooks 393 | ### Registering Webhooks 394 | Document webhook registration process if applicable. 395 | 396 | ### Webhook Events 397 | - \`resource.created\` - Fired when a new resource is created 398 | - \`resource.updated\` - Fired when a resource is updated 399 | - \`resource.deleted\` - Fired when a resource is deleted 400 | 401 | ### Webhook Payload Example 402 | \`\`\`json 403 | { 404 | "event": "resource.created", 405 | "timestamp": "2024-01-15T11:00:00Z", 406 | "data": { 407 | "id": "res_124", 408 | "name": "New Resource" 409 | } 410 | } 411 | \`\`\` 412 | 413 | ### Webhook Security 414 | - Document signature verification mechanism 415 | - Explain retry logic for failed deliveries 416 | 417 | ## Error Responses 418 | ### Standard Error Format 419 | \`\`\`json 420 | { 421 | "error": { 422 | "code": "VALIDATION_ERROR", 423 | "message": "Invalid input data", 424 | "details": [ 425 | { 426 | "field": "email", 427 | "message": "Invalid email format" 428 | } 429 | ], 430 | "requestId": "req_abc123" 431 | } 432 | } 433 | \`\`\` 434 | 435 | ### HTTP Status Codes 436 | - \`200 OK\`: Successful request 437 | - \`201 Created\`: Resource created successfully 438 | - \`204 No Content\`: Successful deletion 439 | - \`400 Bad Request\`: Invalid input or validation error 440 | - \`401 Unauthorized\`: Missing or invalid authentication 441 | - \`403 Forbidden\`: Insufficient permissions 442 | - \`404 Not Found\`: Resource not found 443 | - \`409 Conflict\`: Resource conflict (duplicate, dependencies) 444 | - \`422 Unprocessable Entity\`: Valid syntax but semantic errors 445 | - \`429 Too Many Requests\`: Rate limit exceeded 446 | - \`500 Internal Server Error\`: Server error 447 | - \`503 Service Unavailable\`: Service temporarily unavailable 448 | 449 | ### Common Error Codes 450 | - \`VALIDATION_ERROR\`: Input validation failed 451 | - \`AUTHENTICATION_REQUIRED\`: No authentication provided 452 | - \`AUTHENTICATION_FAILED\`: Invalid credentials 453 | - \`PERMISSION_DENIED\`: Insufficient permissions 454 | - \`RESOURCE_NOT_FOUND\`: Requested resource doesn't exist 455 | - \`RATE_LIMIT_EXCEEDED\`: Too many requests 456 | - \`INTERNAL_ERROR\`: Server-side error 457 | 458 | ## SDK & Client Libraries 459 | - Document available SDKs (JavaScript, Python, Ruby, etc.) 460 | - Link to SDK documentation and examples 461 | 462 | ## Postman/OpenAPI Collection 463 | - Link to Postman collection for easy testing 464 | - Link to OpenAPI/Swagger specification if available 465 | 466 | <!-- agent-readonly:guidance --> 467 | ## AI Update Checklist 468 | 1. Review route definitions and controller implementations for new or changed endpoints 469 | 2. Verify authentication flows match current implementation 470 | 3. Update rate limiting policies if thresholds changed 471 | 4. Document new error codes and response formats 472 | 5. Ensure all examples use realistic data and current API structure 473 | 6. Validate that webhook event types are current 474 | 7. Update SDK links and availability 475 | 476 | <!-- agent-readonly:sources --> 477 | ## Acceptable Sources 478 | - API route files and controller implementations 479 | - Authentication middleware and JWT/OAuth configurations 480 | - OpenAPI/Swagger specifications 481 | - Integration tests that demonstrate API usage 482 | - Postman collections or API testing suites 483 | 484 | <!-- agent-update:end --> 485 | `; 486 | } --------------------------------------------------------------------------------