├── images ├── start.png ├── search.png ├── burndown.png └── milestone.png ├── LICENSE ├── .claude └── commands │ └── gustav │ ├── help.md │ ├── utils │ ├── test_enhance_system.py │ ├── enhance_cli.py │ ├── velocity_cli.py │ ├── research_integrator.py │ ├── executor_cli.py │ ├── json_updater.py │ ├── dependency_analyzer.py │ └── task_inserter.py │ ├── validator.md │ ├── enhance.md │ ├── planner.md │ ├── executor.md │ └── audit.md ├── .gitignore └── README.md /images/start.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dimitritholen/gustav/HEAD/images/start.png -------------------------------------------------------------------------------- /images/search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dimitritholen/gustav/HEAD/images/search.png -------------------------------------------------------------------------------- /images/burndown.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dimitritholen/gustav/HEAD/images/burndown.png -------------------------------------------------------------------------------- /images/milestone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dimitritholen/gustav/HEAD/images/milestone.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Dimitri Tholen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.claude/commands/gustav/help.md: -------------------------------------------------------------------------------- 1 | --- 2 | allowed-tools: 3 | - Read 4 | description: "Usage: /gustav:help - Display Gustav framework overview and commands" 5 | --- 6 | 7 | **OUTPUT THE FOLLOWING EXACTLY AS IS - DO NOT ADD YOUR OWN TEXT**: 8 | 9 | ``` 10 | ● 11 | ██████ ██ ██ ███████ ████████ █████ ██ ██ 12 | ██ ██ ██ ██ ██ ██ ██ ██ ██ 13 | ██ ███ ██ ██ ███████ ██ ███████ ██ ██ 14 | ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ 15 | ██████ ██████ ███████ ██ ██ ██ ████ 16 | 17 | A sprint orchestrator 18 | --------------------- 19 | 20 | Gustav turns your idea into an enterprise-grade application with protection against over-engineering, feature creep, and buggy code. 21 | 22 | ## Commands 23 | 24 | /gustav:planner - Create sprint plan from PRD 25 | /gustav:executor [task-id] - Execute tasks or specific task 26 | /gustav:enhance [description] - Add features to existing sprint 27 | /gustav:validator [milestone] - Validate milestone completion 28 | /gustav:velocity [sprint-id] - Analyze sprint performance 29 | /gustav:audit [scope] - Security compliance scan 30 | 31 | ## Quick Start 32 | 33 | 1. Create a Product Requirements Document (PRD) 34 | 2. Run: /gustav:planner 35 | 3. Run: /gustav:executor 36 | 4. Validate milestones: /gustav:validator 37 | 5. Monitor progress: /gustav:velocity 38 | 39 | Gustav uses TDD, human-in-the-loop validation, and milestone-based development for quality assurance. 40 | 41 | Peace. 42 | ``` -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Directories to ignore 2 | .tasks/ 3 | docs/# Byte-compiled / optimized / DLL files 4 | docs/*.md 5 | __pycache__/ 6 | *.py[codz] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py.cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # UV 101 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 102 | # This is especially recommended for binary packages to ensure reproducibility, and is more 103 | # commonly ignored for libraries. 104 | #uv.lock 105 | 106 | # poetry 107 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 108 | # This is especially recommended for binary packages to ensure reproducibility, and is more 109 | # commonly ignored for libraries. 110 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 111 | #poetry.lock 112 | #poetry.toml 113 | 114 | # pdm 115 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 116 | # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python. 117 | # https://pdm-project.org/en/latest/usage/project/#working-with-version-control 118 | #pdm.lock 119 | #pdm.toml 120 | .pdm-python 121 | .pdm-build/ 122 | 123 | # pixi 124 | # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control. 125 | #pixi.lock 126 | # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one 127 | # in the .venv directory. It is recommended not to include this directory in version control. 128 | .pixi 129 | 130 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 131 | __pypackages__/ 132 | 133 | # Celery stuff 134 | celerybeat-schedule 135 | celerybeat.pid 136 | 137 | # SageMath parsed files 138 | *.sage.py 139 | 140 | # Environments 141 | .env 142 | .envrc 143 | .venv 144 | env/ 145 | venv/ 146 | ENV/ 147 | env.bak/ 148 | venv.bak/ 149 | 150 | # Spyder project settings 151 | .spyderproject 152 | .spyproject 153 | 154 | # Rope project settings 155 | .ropeproject 156 | 157 | # mkdocs documentation 158 | /site 159 | 160 | # mypy 161 | .mypy_cache/ 162 | .dmypy.json 163 | dmypy.json 164 | 165 | # Pyre type checker 166 | .pyre/ 167 | 168 | # pytype static type analyzer 169 | .pytype/ 170 | 171 | # Cython debug symbols 172 | cython_debug/ 173 | 174 | # PyCharm 175 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 176 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 177 | # and can be added to the global gitignore or merged into this file. For a more nuclear 178 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 179 | #.idea/ 180 | 181 | # Abstra 182 | # Abstra is an AI-powered process automation framework. 183 | # Ignore directories containing user credentials, local state, and settings. 184 | # Learn more at https://abstra.io/docs 185 | .abstra/ 186 | 187 | # Visual Studio Code 188 | # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore 189 | # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore 190 | # and can be added to the global gitignore or merged into this file. However, if you prefer, 191 | # you could uncomment the following to ignore the entire vscode folder 192 | # .vscode/ 193 | 194 | # Ruff stuff: 195 | .ruff_cache/ 196 | 197 | # PyPI configuration file 198 | .pypirc 199 | 200 | # Marimo 201 | marimo/_static/ 202 | marimo/_lsp/ 203 | __marimo__/ 204 | 205 | # Streamlit 206 | .streamlit/secrets.toml 207 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ██████ ██ ██ ███████ ████████ █████ ██ ██ 2 | ██ ██ ██ ██ ██ ██ ██ ██ ██ 3 | ██ ███ ██ ██ ███████ ██ ███████ ██ ██ 4 | ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ 5 | ██████ ██████ ███████ ██ ██ ██ ████ 6 | 7 | A sprint orchestrator for Claude Code 8 | --------------------- 9 | 10 | Welcome to Gustav, named after the legendary Orchestrator Gustav Mahler. This prompt framework is designed to turn your idea into an enterprise-grade application with lots of protection against over-engineering, feature creep, hallucinations and buggy code. 11 | 12 | Gustav will pro-actively monitor progress and code quality, so you can focus on the features. 13 | 14 | **Disclaimer:** This project is in beta. If you encounter any issues, please let me know! 15 | 16 | ## Installation 17 | 18 | 1. Open a terminal 19 | 2. Navigate to the folder you wish to use Gustav in 20 | 3. Run the command below 21 | 22 | ```bash 23 | TEMP_DIR=$(mktemp -d) && git clone git@github.com:dimitritholen/gustav.git "$TEMP_DIR" && mkdir -p ./.claude && cp -r "$TEMP_DIR/.claude"/* ./.claude/ && rm -rf "$TEMP_DIR" 24 | ``` 25 | 26 | ## The Sprint Planner 27 | 28 | To start out you need a Product Requirements Document (PRD) detailing your application idea. You can place the document anywhere you like, as long as it is in the same project folder or one of its sub-folders. 29 | 30 | Just run: 31 | 32 | ``` 33 | /gustav:planner 34 | ``` 35 | 36 | If you need help with any of the commands, you can always run: 37 | 38 | ``` 39 | /gustav:help 40 | ``` 41 | 42 | Gustav will do a lot of research so your application will be developed using the latest technologies in the best framework(s) for your particular goal. It will also decide which 7 features are going to be in the MVP version. Don't worry, Gustav will not remove any features. Any feature that doesn't make the cut for the MVP will be safely stored in the ./tasks/deferred.json file to be picked up later. 43 | 44 | ![milestone](images/start.png) 45 | 46 | ![milestone](images/search.png) 47 | *Parallel research agents* 48 | 49 | ## The programmer 50 | 51 | Once the planner has created all the necessary files, all you need to do is run: 52 | 53 | ``` 54 | /gustav:executor [task-id] 55 | ``` 56 | 57 | You can either let Gustav pick the next task automatically, or specify a particular task ID if you want to work on something specific. 58 | 59 | Sit back and relax. Gustav will do all the heavy lifting. Your application will be built on solid best practices, like Test Driven Development (TDD), code quality tools and more. 60 | 61 | ## The Feature Enhancer 62 | 63 | Now here's where things get really interesting. Let's say you're halfway through development and suddenly think "Oh man, I totally forgot about that cool feature!" or "What if we added this awesome thing?" Don't worry, Gustav has you covered. 64 | 65 | ``` 66 | /gustav:enhance [feature-description] 67 | ``` 68 | 69 | Just describe what you want to add and Gustav will intelligently figure out where it fits in your existing sprint plan. It will research any new technologies needed, analyze dependencies, and insert the new tasks in the perfect spots without breaking your milestone flow. It's like having a smart project manager who can reshape the plan on the fly while keeping everything organized and on track. 70 | 71 | ## The QA person 72 | 73 | Now, most other frameworks are like a big black box. They keep developing hours on end and you really have no idea what is going on. Not Gustav. I have designed this framework with the human-in-the-loop as focal point. Gustav will not develop more than 3-4 tasks per milestone. Each milestone is a point in the development journey to start up the application and have a look. In fact, Gustav refuses to continue until you run the milestone validator: 74 | 75 | ``` 76 | /gustav:validator [milestone-id] 77 | ``` 78 | 79 | You can validate the current milestone automatically, or specify a particular milestone ID if needed. 80 | 81 | ![milestone](images/milestone.png) 82 | 83 | The validator will run tests, do code quality checks, checks endpoints of APIs, visits websites to check error messages. 84 | 85 | ## The Project Manager 86 | 87 | Gustav also includes a burndown chart. 88 | 89 | ``` 90 | /gustav:velocity [sprint-id] 91 | ``` 92 | 93 | This will analyze the current sprint by default, or you can specify a particular sprint ID to review past performance. 94 | 95 | ![velocity](images/burndown.png) 96 | 97 | 98 | ## That Security Dude 99 | 100 | And finally we have the security scanner: 101 | 102 | ``` 103 | /gustav:audit [scope: full|dependencies|code|config] 104 | ``` 105 | 106 | You can run a full security audit, or focus on specific areas like dependencies, code analysis, or configuration review. 107 | 108 | This tool will check your application against a number of security compliance frameworks: 109 | 110 | - OWASP Top 10 (2024) 111 | - CWE/SANS Top 25 112 | - PCI DSS (payment systems) 113 | - GDPR (data privacy) 114 | - SOC 2 Type II 115 | - HIPAA (healthcare) 116 | - ISO 27001/27002 117 | 118 | And there you have it. This framework is in active development by me, Dimitri Tholen. It's a constantly evolving beast. I keep refining, testing, evaluating, until I have created the ultimate autonomous AI coding team inside Claude Code. 119 | 120 | Peace. 121 | 122 | [LinkedIn](https://www.linkedin.com/in/dimitri-tholen-436825231) 123 | -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/test_enhance_system.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Integration test for Gustav Enhancement System 4 | 5 | Tests the complete flow from feature analysis to JSON updates. 6 | """ 7 | 8 | import sys 9 | import json 10 | from dependency_analyzer import DependencyAnalyzer, FeatureAnalysis 11 | from task_inserter import TaskInserter, InsertionPlan 12 | from json_updater import JsonUpdater 13 | from research_integrator import ResearchIntegrator 14 | 15 | def test_enhancement_flow(feature_description: str, dry_run: bool = True): 16 | """Test the complete enhancement flow""" 17 | 18 | print(f"🧪 Testing Enhancement Flow") 19 | print(f"Feature: {feature_description}") 20 | print(f"Dry Run: {dry_run}") 21 | print("=" * 60) 22 | 23 | try: 24 | # Step 1: Analyze feature 25 | print("1️⃣ Analyzing feature...") 26 | analyzer = DependencyAnalyzer() 27 | analysis = analyzer.analyze_feature(feature_description) 28 | 29 | print(f" ✅ Complexity: {analysis.complexity}") 30 | print(f" ✅ Estimated Tasks: {analysis.estimated_tasks}") 31 | print(f" ✅ New Technologies: {analysis.new_technologies}") 32 | print(f" ✅ Dependencies: {len(analysis.dependencies)}") 33 | print(f" ✅ Conflicts: {len(analysis.conflicts)}") 34 | print() 35 | 36 | # Step 2: Research analysis 37 | print("2️⃣ Analyzing research needs...") 38 | research_integrator = ResearchIntegrator() 39 | research_needs = research_integrator.analyze_research_needs(analysis.new_technologies) 40 | 41 | needs_research = [tech for tech, status in research_needs.items() 42 | if status == 'new_research_required'] 43 | print(f" ✅ Technologies needing research: {len(needs_research)}") 44 | 45 | if needs_research: 46 | queries = research_integrator.generate_research_queries( 47 | needs_research, 48 | research_integrator.existing_research 49 | ) 50 | agents = research_integrator.create_research_agents(queries) 51 | print(f" ✅ Research agents required: {len(agents)}") 52 | print() 53 | 54 | # Step 3: Find insertion options 55 | print("3️⃣ Finding insertion options...") 56 | inserter = TaskInserter() 57 | options = inserter.find_insertion_options(analysis) 58 | 59 | print(f" ✅ Insertion options found: {len(options)}") 60 | if options: 61 | best_option = options[0] 62 | print(f" ✅ Best option: {best_option.strategy.value}") 63 | print(f" ✅ Target milestone: {best_option.target_milestone_id}") 64 | print(f" ✅ Dependencies satisfied: {best_option.dependencies_satisfied}") 65 | print(f" ✅ Impact score: {best_option.impact_score}") 66 | print() 67 | 68 | # Step 4: Create insertion plan 69 | print("4️⃣ Creating insertion plan...") 70 | if options: 71 | plan = inserter.create_insertion_plan(analysis, best_option) 72 | print(f" ✅ Tasks to create: {len(plan.new_tasks)}") 73 | print(f" ✅ Milestones to update: {len(plan.updated_milestones)}") 74 | print(f" ✅ Structural changes: {plan.impact_summary.get('structural_changes', False)}") 75 | else: 76 | print(" ❌ No insertion options available") 77 | return False 78 | print() 79 | 80 | # Step 5: JSON updates (dry run or actual) 81 | print("5️⃣ Preparing JSON updates...") 82 | updater = JsonUpdater() 83 | 84 | if dry_run: 85 | print(" 🔍 DRY RUN - Files that would be updated:") 86 | print(f" - task_graph.json: +{len(plan.new_tasks)} tasks") 87 | print(f" - progress_tracker.json: totals and milestone updates") 88 | print(f" - guardrail_config.json: protection rules") 89 | print(f" - prd_digest.json: enhancement tracking") 90 | if analysis.new_technologies: 91 | print(f" - techstack_research.json: +{len(analysis.new_technologies)} technologies") 92 | if analysis.conflicts: 93 | print(f" - deferred.json: potential feature removals") 94 | else: 95 | print(" 🚀 Applying updates...") 96 | summary = updater.apply_enhancement(analysis, plan) 97 | print(f" ✅ Files updated: {len(summary.files_updated)}") 98 | print(f" ✅ Backup location: {summary.backup_location}") 99 | print(f" ✅ Tasks added: {len(summary.new_task_ids)}") 100 | print(f" ✅ Total tasks: {summary.total_tasks_before} → {summary.total_tasks_after}") 101 | print() 102 | 103 | # Step 6: Validation 104 | print("6️⃣ System validation...") 105 | if not dry_run: 106 | try: 107 | updater._validate_json_consistency() 108 | print(" ✅ JSON consistency validation passed") 109 | except Exception as e: 110 | print(f" ❌ Validation failed: {e}") 111 | return False 112 | else: 113 | print(" 🔍 Validation skipped (dry run)") 114 | print() 115 | 116 | print("🎉 Enhancement system test completed successfully!") 117 | return True 118 | 119 | except Exception as e: 120 | print(f"❌ Test failed with error: {e}") 121 | import traceback 122 | traceback.print_exc() 123 | return False 124 | 125 | def test_multiple_scenarios(): 126 | """Test multiple enhancement scenarios""" 127 | 128 | scenarios = [ 129 | "Add keyboard shortcut to pause/resume recording", 130 | "Add support for multiple Simplicate accounts with account switching", 131 | "Add text-to-speech responses for confirmation", 132 | "Add visual feedback with animated system tray icon", 133 | "Add command line interface for batch time entry import" 134 | ] 135 | 136 | print("🧪 Testing Multiple Enhancement Scenarios") 137 | print("=" * 60) 138 | 139 | results = [] 140 | for i, scenario in enumerate(scenarios, 1): 141 | print(f"\nScenario {i}: {scenario}") 142 | print("-" * 40) 143 | 144 | success = test_enhancement_flow(scenario, dry_run=True) 145 | results.append((scenario, success)) 146 | 147 | if not success: 148 | print(f"❌ Scenario {i} failed") 149 | break 150 | else: 151 | print(f"✅ Scenario {i} passed") 152 | 153 | print(f"\n📊 Results Summary:") 154 | for scenario, success in results: 155 | status = "✅" if success else "❌" 156 | print(f" {status} {scenario}") 157 | 158 | total_passed = sum(1 for _, success in results if success) 159 | print(f"\n🏆 {total_passed}/{len(scenarios)} scenarios passed") 160 | 161 | return total_passed == len(scenarios) 162 | 163 | def main(): 164 | """Main test function""" 165 | 166 | if len(sys.argv) < 2: 167 | print("Usage:") 168 | print(" test_enhance_system.py 'feature description' # Test single feature") 169 | print(" test_enhance_system.py --all # Test multiple scenarios") 170 | print(" test_enhance_system.py --help # Show this help") 171 | sys.exit(1) 172 | 173 | if sys.argv[1] == '--help': 174 | main() 175 | elif sys.argv[1] == '--all': 176 | success = test_multiple_scenarios() 177 | sys.exit(0 if success else 1) 178 | else: 179 | feature_desc = sys.argv[1] 180 | dry_run = '--apply' not in sys.argv 181 | success = test_enhancement_flow(feature_desc, dry_run=dry_run) 182 | sys.exit(0 if success else 1) 183 | 184 | if __name__ == "__main__": 185 | main() -------------------------------------------------------------------------------- /.claude/commands/gustav/validator.md: -------------------------------------------------------------------------------- 1 | --- 2 | allowed-tools: 3 | - Bash 4 | - Read 5 | - Edit 6 | - Grep 7 | - Glob 8 | description: "Usage: /gustav:validate [milestone-id] - Validate milestone completion and application state" 9 | --- 10 | 11 | Validate that a milestone has been successfully completed and the application is ready for human review. 12 | 13 | You are **Milestone Validator** — responsible for ensuring each development milestone produces a launchable, reviewable application state. 14 | 15 | ## AUTOMATED VALIDATION FEATURES 16 | 17 | **CONTINUOUS VALIDATION:** 18 | 19 | - Smoke test execution 20 | - Performance baseline comparison 21 | - Security scan integration 22 | - Accessibility validation 23 | - Cross-browser testing (if web app) 24 | 25 | **VALIDATION METRICS:** 26 | 27 | - Launch success rate: Track across milestones 28 | - Feature completion: Percentage of planned features working 29 | - Quality score: Aggregate of all quality metrics 30 | - Time to validation: Efficiency tracking 31 | 32 | ## VALIDATION WORKFLOW 33 | 34 | ### Phase 1: Application Launch Test 35 | 36 | ```yaml 37 | LAUNCH_TEST: 38 | 1. Identify application type from techstack_research.json 39 | 2. Execute appropriate launch command: 40 | - Web: npm run dev / yarn dev / python manage.py runserver 41 | - CLI: Run help command 42 | - API: Start server and check health endpoint 43 | 3. Verify application starts without errors 44 | 4. Check for console/terminal errors 45 | 5. Document launch status 46 | ``` 47 | 48 | ### Phase 2: Feature Validation 49 | 50 | ```yaml 51 | FEATURE_CHECK: 52 | 1. Read milestone validation_criteria from task_graph.json 53 | 2. For each test scenario: 54 | - Execute the test 55 | - Document result (pass/fail) 56 | - Capture screenshots if UI involved 57 | 3. Verify all critical features work 58 | 4. Test basic user flows 59 | ``` 60 | 61 | ### Phase 3: Integration Testing 62 | 63 | ```yaml 64 | INTEGRATION_TEST: 65 | 1. Run any existing test suites 66 | 2. Check database connectivity (if applicable) 67 | 3. Verify API endpoints respond 68 | 4. Test data persistence 69 | 5. Validate UI updates reflect backend changes 70 | ``` 71 | 72 | ### Phase 4: Code Quality Check 73 | 74 | **EXTENDED QUALITY CHECKS:** 75 | 76 | ```yaml 77 | ADVANCED_QUALITY: 78 | Security_Scan: Run OWASP dependency check 79 | Performance_Test: Compare against baseline 80 | Accessibility_Audit: WCAG compliance check 81 | Documentation_Coverage: Verify docs updated 82 | API_Contract_Test: Validate API specifications 83 | ``` 84 | 85 | ```yaml 86 | QUALITY_VERIFICATION: 87 | 1. Run linting (no errors allowed) 88 | 2. Run type checking (if applicable) 89 | 3. Check test coverage meets minimum 90 | 4. Verify no TODO/FIXME comments 91 | 5. Ensure no console.log/print debug statements 92 | ``` 93 | 94 | ### Phase 5: Generate Status Report 95 | 96 | ```yaml 97 | STATUS_REPORT: 98 | Generate comprehensive milestone report including: 99 | - Milestone ID and name 100 | - Tasks completed 101 | - Features implemented 102 | - Test results 103 | - Quality metrics 104 | - Screenshots/evidence 105 | - Issues found 106 | - Recommendation (proceed/fix/rollback) 107 | ``` 108 | 109 | ### Phase 6: Update Progress Tracker 110 | 111 | ```yaml 112 | UPDATE_TRACKER: 113 | Update .tasks/progress_tracker.json with validation results: 114 | 1. Set validation_status: "passed" or "failed" 115 | 2. Clear validation_pending flag 116 | 3. Add entry to validation_history: 117 | { 118 | "milestone_id": "M1", 119 | "validated_at": "2025-08-11T10:30:00Z", 120 | "status": "passed/failed", 121 | "issues_found": [], 122 | "validator": "/milestone-validator" 123 | } 124 | 4. If passed: Set ready_for_human_review: true 125 | 5. If failed: Document issues for resolution 126 | ``` 127 | 128 | ## VALIDATION CRITERIA 129 | 130 | ### Pass Criteria 131 | 132 | - Application launches successfully ✅ 133 | - All milestone features work ✅ 134 | - No critical errors ✅ 135 | - Quality gates pass ✅ 136 | - Ready for human review ✅ 137 | 138 | ### Fail Criteria 139 | 140 | - Application won't start ❌ 141 | - Core features broken ❌ 142 | - Critical errors present ❌ 143 | - Quality gates fail ❌ 144 | - Not ready for review ❌ 145 | 146 | ## MILESTONE REPORT TEMPLATE 147 | 148 | ```markdown 149 | ## Milestone Validation Report 150 | 151 | ### Milestone: M[N] - [Name] 152 | **Date:** [timestamp] 153 | **Status:** PASS / FAIL / PARTIAL 154 | 155 | ### Tasks Completed 156 | - [x] T-F1-01: Project setup 157 | - [x] T-F1-02: Database schema 158 | - [x] T-F1-03: Landing page 159 | - [x] T-VAL-01: Validation 160 | 161 | ### Application Status 162 | - **Launches:** Yes/No 163 | - **URL/Access:** http://localhost:3000 164 | - **Console Errors:** None/List 165 | - **Build Status:** Success/Failure 166 | 167 | ### Feature Validation 168 | | Feature | Status | Notes | 169 | |---------|--------|-------| 170 | | Homepage renders | ✅ Pass | Loads in 1.2s | 171 | | Navigation works | ✅ Pass | All links functional | 172 | | Database connected | ✅ Pass | PostgreSQL connected | 173 | 174 | ### Quality Metrics 175 | - **Test Coverage:** 75% 176 | - **Linting:** 0 errors, 0 warnings 177 | - **Type Check:** Pass 178 | - **Build Time:** 3.4s 179 | 180 | ### Evidence 181 | - Screenshot: [homepage.png] 182 | - Test Output: [test-results.txt] 183 | - Console Log: [console-clean.txt] 184 | 185 | ### Issues Found 186 | 1. Minor: Logo image missing (using placeholder) 187 | 2. Minor: Loading spinner stays 1s too long 188 | 189 | ### Recommendation 190 | **✅ PROCEED TO NEXT MILESTONE** 191 | Application is stable and ready for human review. 192 | All critical features work as expected. 193 | 194 | ### Next Steps 195 | 1. Human reviews application at http://localhost:3000 196 | 2. Feedback incorporated if needed 197 | 3. Proceed to Milestone M[N+1] 198 | 199 | ### Rollback Point 200 | Git tag: `milestone-[N]-complete` 201 | Branch: `milestone-[N]-stable` 202 | ``` 203 | 204 | ## COMMANDS BY TECHNOLOGY 205 | 206 | ### Node.js/JavaScript 207 | 208 | ```bash 209 | npm run dev # Start dev server 210 | npm test # Run tests 211 | npm run lint # Check linting 212 | npm run type-check # TypeScript check 213 | ``` 214 | 215 | ### Python 216 | 217 | ```bash 218 | python manage.py runserver # Django 219 | flask run # Flask 220 | pytest # Run tests 221 | pylint src/ # Linting 222 | ``` 223 | 224 | ### Java 225 | 226 | ```bash 227 | mvn spring-boot:run # Spring Boot 228 | gradle bootRun # Gradle 229 | mvn test # Run tests 230 | ``` 231 | 232 | ### Go 233 | 234 | ```bash 235 | go run main.go # Run application 236 | go test ./... # Run tests 237 | golangci-lint run # Linting 238 | ``` 239 | 240 | ## INTEGRATION CAPABILITIES 241 | 242 | ### Automated Notifications 243 | 244 | ```yaml 245 | NOTIFICATION_CHANNELS: 246 | Slack: Post to #dev-milestones channel 247 | Email: Send report to stakeholders 248 | Jira: Update sprint status 249 | GitHub: Create milestone release 250 | ``` 251 | 252 | ### Screenshot Automation 253 | 254 | - Capture key UI states 255 | - Generate visual diff reports 256 | - Create demo videos for complex features 257 | 258 | ## HUMAN REVIEW TRIGGERS 259 | 260 | After validation completes: 261 | 262 | 1. **Generate notification** that milestone is ready 263 | 2. **Provide access details** (URL, credentials if needed) 264 | 3. **List what to test** (key features to verify) 265 | 4. **Wait for approval** before continuing 266 | 5. **Incorporate feedback** if changes requested 267 | 268 | ## ROLLBACK PROCEDURES 269 | 270 | If milestone fails validation: 271 | 272 | ```bash 273 | # Create rollback point 274 | git tag milestone-[N]-failed 275 | git checkout milestone-[N-1]-stable 276 | 277 | # Document failure 278 | echo "Failure reason" > .tasks/milestone-[N]-issues.md 279 | 280 | # Plan fixes 281 | Update task_graph.json with fix tasks 282 | ``` 283 | 284 | ## VALIDATION PATTERNS 285 | 286 | ### Progressive Enhancement 287 | 288 | 1. **Basic Validation**: Application launches 289 | 2. **Feature Validation**: Core features work 290 | 3. **Integration Validation**: Components work together 291 | 4. **Quality Validation**: Meets all quality gates 292 | 5. **User Validation**: Ready for human review 293 | 294 | ### Rollback Strategy 295 | 296 | ```bash 297 | # Automated rollback on failure 298 | if [ "$VALIDATION_STATUS" = "failed" ]; then 299 | git checkout milestone-$((N-1))-stable 300 | echo "Rolled back to last stable milestone" 301 | fi 302 | ``` 303 | 304 | ## COMMAND COMPOSITION 305 | 306 | Chains with: 307 | 308 | - `/gustav:planner` — Initial planning 309 | - `/gustav:executor` — Development 310 | - `/gustav:velocity` — Burndown chart 311 | - `/gustav:audit` — Security check 312 | 313 | ## PERFORMANCE METRICS 314 | 315 | - Validation time: 2-5 minutes average 316 | - Early issue detection: 85% of issues caught at milestones 317 | - Rollback frequency: <5% of milestones require rollback 318 | - Human review efficiency: 60% faster with automated reports 319 | 320 | Remember: The goal is to catch issues EARLY, not after 20+ tasks are complete. 321 | -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/enhance_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | CLI Wrapper for Gustav Enhancement System 4 | 5 | Provides command-line interface for the enhance.md command to properly 6 | integrate with Gustav utility scripts for feature enhancement. 7 | """ 8 | 9 | import argparse 10 | import json 11 | import os 12 | import sys 13 | from typing import Optional 14 | 15 | from dependency_analyzer import DependencyAnalyzer, find_project_root 16 | from task_inserter import TaskInserter 17 | from json_updater import JsonUpdater 18 | 19 | 20 | def create_backup(tasks_dir: str) -> str: 21 | """Create backup and return backup directory path""" 22 | try: 23 | updater = JsonUpdater(tasks_dir) 24 | backup_dir = updater.create_backup() 25 | print(f"✅ Backup created: {backup_dir}") 26 | return backup_dir 27 | except Exception as e: 28 | print(f"❌ Backup creation failed: {e}") 29 | sys.exit(1) 30 | 31 | 32 | def get_backup_path(tasks_dir: str) -> str: 33 | """Get the path where backup would be created""" 34 | updater = JsonUpdater(tasks_dir) 35 | return updater.backup_dir 36 | 37 | 38 | def analyze_feature(feature_description: str, tasks_dir: Optional[str] = None) -> dict: 39 | """Analyze feature and return analysis as JSON""" 40 | try: 41 | analyzer = DependencyAnalyzer(tasks_dir) 42 | analysis = analyzer.analyze_feature(feature_description) 43 | 44 | # Convert to dict for JSON serialization 45 | analysis_dict = { 46 | "feature_id": analysis.feature_id, 47 | "description": analysis.description, 48 | "estimated_tasks": analysis.estimated_tasks, 49 | "complexity": analysis.complexity, 50 | "new_technologies": analysis.new_technologies, 51 | "dependencies": [ 52 | { 53 | "task_id": dep.task_id, 54 | "dependency_type": dep.dependency_type.value, 55 | "reason": dep.reason, 56 | "strength": dep.strength 57 | } for dep in analysis.dependencies 58 | ], 59 | "conflicts": analysis.conflicts 60 | } 61 | 62 | return analysis_dict 63 | 64 | except Exception as e: 65 | print(f"❌ Feature analysis failed: {e}") 66 | sys.exit(1) 67 | 68 | 69 | def show_impact_preview(feature_description: str, tasks_dir: Optional[str] = None) -> None: 70 | """Show impact preview of the enhancement""" 71 | try: 72 | analyzer = DependencyAnalyzer(tasks_dir) 73 | analysis = analyzer.analyze_feature(feature_description) 74 | 75 | inserter = TaskInserter(tasks_dir) 76 | options = inserter.find_insertion_options(analysis) 77 | 78 | if not options: 79 | print("❌ No suitable insertion options found") 80 | sys.exit(1) 81 | 82 | # Use the best option for preview 83 | best_option = options[0] 84 | plan = inserter.create_insertion_plan(analysis, best_option) 85 | 86 | print("📊 Enhancement Impact Preview:") 87 | print(f"├─ Tasks to add: {len(plan.new_tasks)}") 88 | print(f"├─ Target milestone: {best_option.target_milestone_id}") 89 | print(f"├─ Strategy: {best_option.strategy.value}") 90 | print(f"├─ Capacity after: {best_option.capacity_after}") 91 | print(f"├─ Dependencies satisfied: {'Yes' if best_option.dependencies_satisfied else 'No'}") 92 | print(f"├─ New technologies: {len(analysis.new_technologies)}") 93 | print(f"└─ Complexity: {analysis.complexity}") 94 | 95 | if analysis.conflicts: 96 | print("⚠️ Potential conflicts:") 97 | for conflict in analysis.conflicts: 98 | print(f" - {conflict}") 99 | 100 | except Exception as e: 101 | print(f"❌ Impact preview failed: {e}") 102 | sys.exit(1) 103 | 104 | 105 | def apply_enhancement( 106 | feature_description: str, 107 | tasks_dir: str, 108 | backup_dir: Optional[str] = None 109 | ) -> dict: 110 | """Apply enhancement and return summary""" 111 | try: 112 | # Step 1: Analyze feature 113 | analyzer = DependencyAnalyzer(tasks_dir) 114 | analysis = analyzer.analyze_feature(feature_description) 115 | 116 | # Step 2: Find insertion options 117 | inserter = TaskInserter(tasks_dir) 118 | options = inserter.find_insertion_options(analysis) 119 | 120 | if not options: 121 | print("❌ No suitable insertion options found") 122 | sys.exit(1) 123 | 124 | # Step 3: Create insertion plan (use best option) 125 | plan = inserter.create_insertion_plan(analysis, options[0]) 126 | 127 | # Step 4: Apply changes atomically 128 | updater = JsonUpdater(tasks_dir) 129 | summary = updater.apply_enhancement(analysis, plan) 130 | 131 | # Return summary as dict 132 | summary_dict = { 133 | "files_updated": summary.files_updated, 134 | "backup_location": summary.backup_location, 135 | "new_task_ids": summary.new_task_ids, 136 | "milestones_affected": summary.milestones_affected, 137 | "total_tasks_before": summary.total_tasks_before, 138 | "total_tasks_after": summary.total_tasks_after 139 | } 140 | 141 | print("🎉 Enhancement complete!") 142 | print(f"📁 Files updated: {', '.join(summary.files_updated)}") 143 | print(f"📦 Backup location: {summary.backup_location}") 144 | print(f"🎯 Tasks added: {len(summary.new_task_ids)}") 145 | 146 | return summary_dict 147 | 148 | except Exception as e: 149 | print(f"❌ Enhancement failed: {e}") 150 | # Try to restore from backup if available 151 | if backup_dir: 152 | try: 153 | updater = JsonUpdater(tasks_dir) 154 | if updater.restore_from_backup(backup_dir): 155 | print(f"✅ Restored from backup: {backup_dir}") 156 | except: 157 | print("❌ Could not restore from backup") 158 | sys.exit(1) 159 | 160 | 161 | def get_project_state(tasks_dir: str) -> dict: 162 | """Get current project state for context""" 163 | try: 164 | # Load task graph and progress tracker 165 | task_graph_path = os.path.join(tasks_dir, "task_graph.json") 166 | progress_path = os.path.join(tasks_dir, "progress_tracker.json") 167 | deferred_path = os.path.join(tasks_dir, "deferred.json") 168 | 169 | state = {} 170 | 171 | if os.path.exists(progress_path): 172 | with open(progress_path, 'r') as f: 173 | progress = json.load(f) 174 | current_milestone = progress.get('current_milestone', {}) 175 | state['current_milestone'] = { 176 | 'id': current_milestone.get('id', 'Unknown'), 177 | 'name': current_milestone.get('name', 'Unknown'), 178 | 'tasks_completed': current_milestone.get('tasks_completed', 0), 179 | 'tasks_total': current_milestone.get('tasks_total', 0), 180 | 'remaining_capacity': max(0, 5 - current_milestone.get('tasks_total', 0)) # Assuming max 5 tasks per milestone 181 | } 182 | 183 | if os.path.exists(deferred_path): 184 | with open(deferred_path, 'r') as f: 185 | deferred = json.load(f) 186 | state['deferred_features'] = deferred.get('deferred_features', []) 187 | 188 | if os.path.exists(task_graph_path): 189 | with open(task_graph_path, 'r') as f: 190 | task_graph = json.load(f) 191 | state['total_tasks'] = len(task_graph.get('tasks', [])) 192 | state['total_milestones'] = len(task_graph.get('milestones', [])) 193 | 194 | return state 195 | 196 | except Exception as e: 197 | print(f"❌ Could not get project state: {e}") 198 | sys.exit(1) 199 | 200 | 201 | def main(): 202 | parser = argparse.ArgumentParser(description='Gustav Enhancement CLI') 203 | parser.add_argument('action', choices=[ 204 | 'create-backup', 205 | 'get-backup-path', 206 | 'analyze-feature', 207 | 'show-impact', 208 | 'apply-enhancement', 209 | 'get-project-state' 210 | ]) 211 | parser.add_argument('feature_description', nargs='?', help='Feature description') 212 | parser.add_argument('tasks_dir', nargs='?', help='Path to .tasks directory') 213 | parser.add_argument('--backup-dir', help='Backup directory for restore') 214 | 215 | args = parser.parse_args() 216 | 217 | # Find tasks directory if not provided 218 | if not args.tasks_dir: 219 | try: 220 | project_root = find_project_root() 221 | args.tasks_dir = os.path.join(project_root, '.tasks') 222 | except ValueError as e: 223 | print(f"❌ {e}") 224 | sys.exit(1) 225 | 226 | if args.action == 'create-backup': 227 | backup_path = create_backup(args.tasks_dir) 228 | print(backup_path) 229 | 230 | elif args.action == 'get-backup-path': 231 | backup_path = get_backup_path(args.tasks_dir) 232 | print(backup_path) 233 | 234 | elif args.action == 'analyze-feature': 235 | if not args.feature_description: 236 | print("❌ Feature description required for analysis") 237 | sys.exit(1) 238 | analysis = analyze_feature(args.feature_description, args.tasks_dir) 239 | print(json.dumps(analysis, indent=2)) 240 | 241 | elif args.action == 'show-impact': 242 | if not args.feature_description: 243 | print("❌ Feature description required for impact preview") 244 | sys.exit(1) 245 | show_impact_preview(args.feature_description, args.tasks_dir) 246 | 247 | elif args.action == 'apply-enhancement': 248 | if not args.feature_description: 249 | print("❌ Feature description required for enhancement") 250 | sys.exit(1) 251 | summary = apply_enhancement(args.feature_description, args.tasks_dir, args.backup_dir) 252 | print(json.dumps(summary, indent=2)) 253 | 254 | elif args.action == 'get-project-state': 255 | state = get_project_state(args.tasks_dir) 256 | print(json.dumps(state, indent=2)) 257 | 258 | 259 | if __name__ == "__main__": 260 | main() -------------------------------------------------------------------------------- /.claude/commands/gustav/enhance.md: -------------------------------------------------------------------------------- 1 | --- 2 | allowed-tools: 3 | - Bash 4 | - Read 5 | - Edit 6 | - Write 7 | - Grep 8 | - Glob 9 | description: "Usage: /gustav:enhance [feature-description] - Add new feature to existing sprint plan" 10 | --- 11 | 12 | Intelligently add a new feature to an existing Gustav sprint plan: $ARGUMENTS 13 | 14 | You are Gustav Feature Enhancer — a smart post-planning assistant that researches new features and inserts them logically into existing sprint structures while maintaining milestone integrity and dependency flows. 15 | 16 | ## Core Capabilities 17 | 18 | - **Smart Research**: Analyzes new features against existing techstack and architecture 19 | - **Intelligent Placement**: Finds optimal insertion points without breaking workflows 20 | - **Atomic Updates**: Updates all `.tasks/*.json` files consistently 21 | - **Dependency Aware**: Respects existing task dependencies and milestone boundaries 22 | - **Protection Maintained**: Preserves all guardrails and scope enforcement mechanisms 23 | 24 | ## Runtime Variables 25 | 26 | - `{feature_description}` = user-provided feature description 27 | - `{existing_milestones}` = current milestone structure from task_graph.json 28 | - `{current_techstack}` = existing technology decisions from techstack_research.json 29 | - `{TODAY}` = current date for research timestamps 30 | 31 | ## Prerequisites Validation 32 | 33 | Before starting, verify sprint plan exists and utilities are available: 34 | 35 | ```bash 36 | # Find project root and check required files exist 37 | PROJECT_ROOT=$(pwd) 38 | while [[ "$PROJECT_ROOT" != "/" ]] && [[ ! -d "$PROJECT_ROOT/.tasks" ]] && [[ ! -d "$PROJECT_ROOT/.git" ]]; do 39 | PROJECT_ROOT=$(dirname "$PROJECT_ROOT") 40 | done 41 | 42 | if [[ ! -f "$PROJECT_ROOT/.tasks/task_graph.json" ]]; then 43 | echo "❌ No existing sprint plan found. Run /gustav:planner first." 44 | exit 1 45 | fi 46 | 47 | # Find Gustav command directory for utility scripts 48 | GUSTAV_DIR="" 49 | if [[ -d "$PROJECT_ROOT/.claude/commands/gustav" ]]; then 50 | GUSTAV_DIR="$PROJECT_ROOT/.claude/commands/gustav" 51 | elif [[ -d ~/.claude/commands/gustav ]]; then 52 | GUSTAV_DIR=~/.claude/commands/gustav 53 | else 54 | echo "❌ Gustav command utilities not found. Check .claude/commands/gustav installation." 55 | exit 1 56 | fi 57 | 58 | # Verify utility scripts exist 59 | if [[ ! -f "$GUSTAV_DIR/utils/json_updater.py" ]] || [[ ! -f "$GUSTAV_DIR/utils/dependency_analyzer.py" ]]; then 60 | echo "❌ Missing Gustav utility scripts. Run /gustav:planner to initialize." 61 | exit 1 62 | fi 63 | 64 | # Check if executor is currently running 65 | STATUS=$(jq -r '.status' "$PROJECT_ROOT/.tasks/progress_tracker.json" 2>/dev/null || echo "planned") 66 | if [[ "$STATUS" == "executing" ]]; then 67 | echo "⚠️ Sprint execution in progress. Use with caution." 68 | echo "Consider running after current milestone validation." 69 | fi 70 | ``` 71 | 72 | ## Enhancement Workflow 73 | 74 | ### Phase 1 — Feature Analysis & Research 75 | 76 | 1. **Parse Feature Description** 77 | - Extract core functionality requirements 78 | - Identify new technologies/frameworks needed 79 | - Determine scope and complexity level 80 | - Cross-reference with existing deferred features 81 | 82 | 2. **Existing Context Analysis** 83 | 84 | ```bash 85 | # Load current project context using Gustav CLI wrapper 86 | echo "📋 Loading current sprint context..." 87 | cd "$GUSTAV_DIR" 88 | 89 | # Get comprehensive project state 90 | PROJECT_STATE=$(python3 utils/enhance_cli.py get-project-state "$PROJECT_ROOT/.tasks") 91 | CURRENT_MILESTONE=$(echo "$PROJECT_STATE" | jq -r '.current_milestone.id') 92 | MILESTONE_CAPACITY=$(echo "$PROJECT_STATE" | jq -r '.current_milestone.remaining_capacity') 93 | DEFERRED_COUNT=$(echo "$PROJECT_STATE" | jq -r '.deferred_features | length') 94 | 95 | echo "Current milestone: $CURRENT_MILESTONE (capacity: $MILESTONE_CAPACITY)" 96 | echo "Deferred features: $DEFERRED_COUNT" 97 | ``` 98 | 99 | 3. **Research Requirements** 100 | - Check if feature exists in deferred.json (reactivation scenario) 101 | - Identify if new technologies are needed 102 | - Research compatibility with existing stack 103 | - Determine if existing research covers needed components 104 | 105 | 4. **Targeted Research (if needed)** 106 | Launch research agents only for truly new components: 107 | 108 | ``` 109 | SA-ENHANCE-TECH — New technology assessment 110 | SA-ENHANCE-COMPAT — Compatibility analysis 111 | SA-ENHANCE-ARCH — Architecture impact analysis 112 | ``` 113 | 114 | ### Phase 2 — Dependency Analysis & Placement 115 | 116 | 1. **Dependency Mapping** 117 | - Identify what existing tasks/features this depends on 118 | - Determine what future features might depend on this 119 | - Check for circular dependencies 120 | - Assess integration complexity 121 | 122 | 2. **Milestone Analysis** 123 | 124 | ```bash 125 | # Current milestone capacity 126 | CURRENT_TASKS=$(jq '.milestones[] | select(.id=="'$CURRENT_MILESTONE'") | .tasks | length' "$PROJECT_ROOT/.tasks/task_graph.json") 127 | MAX_TASKS=$(jq -r '.milestone_strategy.max_tasks_per_milestone' "$PROJECT_ROOT/.tasks/task_graph.json") 128 | CAPACITY=$((MAX_TASKS - CURRENT_TASKS)) 129 | ``` 130 | 131 | 3. **Smart Placement Logic** 132 | 133 | **Option A: Current Milestone Insertion** 134 | - If capacity available AND no dependencies on future milestones 135 | - Insert before validation task 136 | - Update milestone task count 137 | 138 | **Option B: Future Milestone Insertion** 139 | - Find earliest milestone where all dependencies are satisfied 140 | - Check capacity; split milestone if needed 141 | - Maintain validation task positions 142 | 143 | **Option C: New Milestone Creation** 144 | - If feature is complex enough (3+ tasks) 145 | - If doesn't fit cleanly in existing structure 146 | - Create between appropriate milestones 147 | 148 | ### Phase 3 — Task Generation 149 | 150 | Follow same task structure as planner: 151 | 152 | ```json 153 | { 154 | "id": "T-ENH--", 155 | "title": "Verb + Object (<=80 chars)", 156 | "prd_traceability": { 157 | "feature_id": "F-ENH-", 158 | "prd_lines": ["ENHANCEMENT"], 159 | "original_requirement": "" 160 | }, 161 | "scope_boundaries": { 162 | "must_implement": [""], 163 | "must_not_implement": [""], 164 | "out_of_scope_check": "BLOCK if not in must_implement" 165 | }, 166 | "documentation_context": { 167 | "primary_docs": [{"url": "", "version": "", "last_verified": "YYYY-MM-DD"}], 168 | "version_locks": {"": ""}, 169 | "forbidden_patterns": [""] 170 | }, 171 | "hallucination_guards": { 172 | "verify_before_use": ["method signatures", "config options"], 173 | "forbidden_assumptions": ["no defaults assumed", "no guessed configs"] 174 | }, 175 | "context_drift_prevention": { 176 | "task_boundaries": "This task ONLY handles ", 177 | "refer_to_other_tasks": {"": "T-"}, 178 | "max_file_changes": 3, 179 | "if_exceeds": "STOP and verify scope" 180 | }, 181 | "milestone_metadata": { 182 | "milestone_id": "", 183 | "milestone_name": "", 184 | "is_milestone_critical": false, 185 | "can_defer": true, 186 | "milestone_position": "" 187 | }, 188 | "enhancement_metadata": { 189 | "enhancement_id": "ENH-", 190 | "added_date": "YYYY-MM-DD", 191 | "insertion_reason": "", 192 | "impact_assessment": "low|medium|high" 193 | } 194 | } 195 | ``` 196 | 197 | ### Phase 4 — Atomic JSON Updates 198 | 199 | **Use Gustav CLI wrapper for safe, atomic updates:** 200 | 201 | ```bash 202 | # Phase 4A: Create comprehensive backup 203 | echo "📦 Creating backup before enhancement..." 204 | cd "$GUSTAV_DIR" 205 | BACKUP_DIR=$(python3 utils/enhance_cli.py create-backup "$PROJECT_ROOT/.tasks") 206 | 207 | if [[ $? -ne 0 ]]; then 208 | echo "❌ Backup creation failed. Aborting enhancement." 209 | exit 1 210 | fi 211 | 212 | # Phase 4B: Run feature analysis 213 | echo "🔍 Analyzing feature dependencies..." 214 | FEATURE_ANALYSIS=$(python3 utils/enhance_cli.py analyze-feature "${feature_description}" "$PROJECT_ROOT/.tasks") 215 | if [[ $? -ne 0 ]]; then 216 | echo "❌ Feature analysis failed. Aborting enhancement." 217 | exit 1 218 | fi 219 | 220 | echo "Feature analysis completed:" 221 | echo "$FEATURE_ANALYSIS" | jq '.complexity, .estimated_tasks, .new_technologies' 222 | 223 | # Phase 4C: Show impact preview before applying 224 | echo "📊 Enhancement Impact Preview:" 225 | python3 utils/enhance_cli.py show-impact "${feature_description}" "$PROJECT_ROOT/.tasks" 226 | 227 | if [[ $? -ne 0 ]]; then 228 | echo "❌ Could not generate impact preview. Feature may be too complex." 229 | exit 1 230 | fi 231 | 232 | # Phase 4D: Apply enhancement atomically 233 | echo "" 234 | read -p "Continue with enhancement? (y/N): " -n 1 -r 235 | echo "" 236 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 237 | echo "Enhancement cancelled by user." 238 | exit 0 239 | fi 240 | 241 | echo "🚀 Applying enhancement (atomic with automatic rollback on failure)..." 242 | ENHANCEMENT_RESULT=$(python3 utils/enhance_cli.py apply-enhancement \ 243 | "${feature_description}" \ 244 | "$PROJECT_ROOT/.tasks" \ 245 | --backup-dir "$BACKUP_DIR") 246 | 247 | if [[ $? -ne 0 ]]; then 248 | echo "❌ Enhancement failed. Files automatically restored from backup." 249 | exit 1 250 | fi 251 | 252 | echo "$ENHANCEMENT_RESULT" 253 | 254 | # Enhancement is now complete! 255 | # All JSON files have been updated atomically by the Gustav utilities. 256 | # No manual file editing is needed or should be attempted. 257 | ``` 258 | 259 | **Files are automatically updated by enhance_cli.py:** 260 | - ✅ **task_graph.json** - New tasks, milestone updates, scope tracking (AUTOMATIC) 261 | - ✅ **progress_tracker.json** - Task counts, milestone progress, enhancement log (AUTOMATIC) 262 | - ✅ **techstack_research.json** - New technology placeholders if needed (AUTOMATIC) 263 | - ✅ **guardrail_config.json** - Protection rules for complex enhancements (AUTOMATIC) 264 | - ✅ **deferred.json** - Remove reactivated features, update dependencies (AUTOMATIC) 265 | - ✅ **prd_digest.json** - Enhancement tracking, protection metrics (AUTOMATIC) 266 | 267 | **⚠️ IMPORTANT: Do not manually edit JSON files after enhancement - all updates are handled automatically by the utility.** 268 | 269 | ## Enhancement Completion 270 | 271 | Once the enhancement script completes successfully: 272 | 273 | 1. **✅ All JSON files have been updated atomically** 274 | 2. **✅ Backup created automatically** 275 | 3. **✅ Task added to appropriate milestone** 276 | 4. **✅ Dependencies validated and satisfied** 277 | 5. **✅ Enhancement tracking recorded** 278 | 279 | **🎯 ENHANCEMENT IS COMPLETE - NO FURTHER ACTION NEEDED** 280 | 281 | Next step: Run `/gustav:executor` to begin development 282 | 283 | ## Safety Mechanisms 284 | 285 | ### Rollback Protection 286 | 287 | **Automatic backups handled by Gustav CLI wrapper:** 288 | 289 | ```bash 290 | # Backups are created automatically by enhance_cli.py 291 | # Manual restore if needed: 292 | echo "📦 Available backups:" 293 | ls -la "$PROJECT_ROOT/.tasks/backup/" 294 | 295 | # Note: Automatic rollback happens on failure 296 | # Manual restore not typically needed as enhance_cli.py handles it 297 | # But if required, backups are standard JSON files that can be copied back 298 | ``` 299 | 300 | ### Validation Gates 301 | 302 | **Automated validation by Gustav utilities:** 303 | - ✅ JSON syntax validation after each update 304 | - ✅ Task ID uniqueness verification 305 | - ✅ Dependency reference validation 306 | - ✅ Milestone capacity limits enforcement 307 | - ✅ Protection metrics consistency checks 308 | - ✅ Cross-file consistency validation 309 | - ✅ Automatic rollback on validation failure 310 | 311 | ### Impact Assessment 312 | 313 | Show user the impact before committing: 314 | 315 | ```yaml 316 | ENHANCEMENT_IMPACT: 317 | - Tasks Added: X 318 | - Milestones Affected: [list] 319 | - New Dependencies: [list] 320 | - Capacity Changes: [details] 321 | - Research Required: [technologies] 322 | ``` 323 | 324 | ## User Interaction Flow 325 | 326 | 1. **Feature Analysis** 327 | 328 | ``` 329 | 🔍 Analyzing: "{feature_description}" 330 | 📋 Loading current sprint context... 331 | Current milestone: M2 (capacity: 2) 332 | Deferred features: 3 333 | 🎯 Impact Assessment: [low/medium/high] 334 | ✅ Compatible with existing techstack 335 | ``` 336 | 337 | 2. **Placement Options** 338 | 339 | ``` 340 | 📍 Optimal Placement Found: 341 | Target: M2 "Core Features" (capacity: 2/5 tasks) 342 | Dependencies: All satisfied 343 | Estimated tasks: 2 344 | Complexity: Medium 345 | 346 | Alternative options: 347 | - M3 "Advanced Features" (requires T-CORE-003 completion) 348 | - New milestone M2.5 (if feature complexity increases) 349 | ``` 350 | 351 | 3. **Impact Preview** (Automatic via utilities) 352 | 353 | ``` 354 | 📊 Enhancement Impact Preview: 355 | ├─ Tasks to add: 2 356 | ├─ Target milestone: M2 357 | ├─ Files to update: 4 358 | ├─ Backup location: .tasks/backup/20250813_143022 359 | ├─ New dependencies: None 360 | └─ Risk level: Low 361 | ``` 362 | 363 | 4. **Execution** (Atomic via utilities) 364 | 365 | ``` 366 | 📦 Creating backup before enhancement... 367 | ✅ Backup created: .tasks/backup/20250813_143022 368 | 🔍 Analyzing feature dependencies... 369 | 📍 Finding optimal insertion point... 370 | 🚀 Applying enhancement (with automatic backups)... 371 | ✅ JSON consistency validation passed 372 | 373 | 🎉 Enhancement complete! 374 | 📁 Files updated: task_graph.json, progress_tracker.json 375 | 🎯 Next: Run /gustav:executor to continue development 376 | ``` 377 | 378 | ## Example Usage 379 | 380 | ```bash 381 | # Add simple feature 382 | /gustav:enhance "Add keyboard shortcut to pause/resume recording" 383 | 384 | # Add complex feature 385 | /gustav:enhance "Add support for multiple Simplicate accounts with account switching" 386 | 387 | # Reactivate deferred feature 388 | /gustav:enhance "Add text-to-speech responses for confirmation" 389 | ``` 390 | 391 | ## Integration Points 392 | 393 | - **Executor**: Automatically picks up new tasks in execution order 394 | - **Validator**: Includes new tasks in milestone validation 395 | - **Velocity**: Tracks enhancement impact on velocity metrics 396 | - **Audit**: Logs all enhancement activities for security review 397 | 398 | ## Command Composition After Enhancement 399 | 400 | - `/gustav:executor` — Continue development with new tasks 401 | - `/gustav:validator` — Validate milestones including enhancements 402 | - `/gustav:velocity` — Updated burndown with enhancement impact 403 | - `/gustav:audit` — Security review including new features 404 | 405 | YAGNI principle still applies: only add features that provide clear value. Enhancement should feel natural and maintain the protection mechanisms that make Gustav planning robust. 406 | -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/velocity_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Gustav Velocity CLI - Fast data gathering for velocity analysis 4 | Speeds up data collection from JSON files for velocity reporting 5 | """ 6 | 7 | import json 8 | import argparse 9 | import sys 10 | from pathlib import Path 11 | from datetime import datetime, timedelta 12 | from typing import Dict, List, Any, Optional 13 | import statistics 14 | import re 15 | 16 | 17 | class VelocityAnalyzer: 18 | def __init__(self, tasks_dir: Path = None): 19 | self.tasks_dir = tasks_dir or Path(".tasks") 20 | self.data = {} 21 | self.metrics = {} 22 | 23 | def load_json_file(self, filename: str) -> Dict[str, Any]: 24 | """Load a JSON file safely""" 25 | file_path = self.tasks_dir / filename 26 | try: 27 | if file_path.exists(): 28 | with open(file_path, 'r') as f: 29 | return json.load(f) 30 | return {} 31 | except (json.JSONDecodeError, IOError) as e: 32 | print(f"Warning: Could not load {filename}: {e}", file=sys.stderr) 33 | return {} 34 | 35 | def load_all_data(self): 36 | """Load all relevant JSON files in parallel concept""" 37 | files_to_load = [ 38 | 'progress_tracker.json', 39 | 'task_graph.json', 40 | 'techstack_research.json', 41 | 'deferred.json', 42 | 'guardrail_config.json', 43 | 'prd_digest.json' 44 | ] 45 | 46 | for filename in files_to_load: 47 | key = filename.replace('.json', '') 48 | self.data[key] = self.load_json_file(filename) 49 | 50 | print(f"✅ Loaded {len(self.data)} data files") 51 | 52 | def calculate_sprint_metrics(self) -> Dict[str, Any]: 53 | """Calculate current sprint metrics""" 54 | progress = self.data.get('progress_tracker', {}) 55 | task_graph = self.data.get('task_graph', {}) 56 | 57 | # Basic metrics 58 | metrics = { 59 | 'sprint_id': progress.get('sprint_id', 'UNKNOWN'), 60 | 'status': progress.get('status', 'unknown'), 61 | 'created_date': progress.get('created_date'), 62 | 'total_tasks': progress.get('total_tasks', 0), 63 | 'total_features': progress.get('total_features', 0), 64 | 'total_milestones': progress.get('total_milestones', 0), 65 | 'tasks_completed': progress.get('tasks_completed', 0), 66 | 'features_completed': progress.get('features_completed', 0), 67 | 'milestones_completed': progress.get('milestones_completed', 0) 68 | } 69 | 70 | # Calculate percentages 71 | if metrics['total_tasks'] > 0: 72 | metrics['task_completion_rate'] = metrics['tasks_completed'] / metrics['total_tasks'] 73 | else: 74 | metrics['task_completion_rate'] = 0.0 75 | 76 | if metrics['total_features'] > 0: 77 | metrics['feature_completion_rate'] = metrics['features_completed'] / metrics['total_features'] 78 | else: 79 | metrics['feature_completion_rate'] = 0.0 80 | 81 | if metrics['total_milestones'] > 0: 82 | metrics['milestone_completion_rate'] = metrics['milestones_completed'] / metrics['total_milestones'] 83 | else: 84 | metrics['milestone_completion_rate'] = 0.0 85 | 86 | # Current milestone info 87 | current_milestone = progress.get('current_milestone', {}) 88 | if current_milestone: 89 | metrics['current_milestone'] = { 90 | 'id': current_milestone.get('id'), 91 | 'name': current_milestone.get('name'), 92 | 'progress': current_milestone.get('progress', 0), 93 | 'tasks_completed': current_milestone.get('tasks_completed', 0), 94 | 'tasks_total': current_milestone.get('tasks_total', 0) 95 | } 96 | if current_milestone.get('tasks_total', 0) > 0: 97 | metrics['current_milestone']['completion_rate'] = ( 98 | current_milestone.get('tasks_completed', 0) / current_milestone.get('tasks_total', 1) 99 | ) 100 | 101 | # Calculate days since start 102 | if metrics['created_date']: 103 | try: 104 | created = datetime.fromisoformat(metrics['created_date']) 105 | now = datetime.now() 106 | metrics['days_elapsed'] = (now - created).days 107 | 108 | # Calculate velocity (tasks per day) 109 | if metrics['days_elapsed'] > 0: 110 | metrics['velocity_tasks_per_day'] = metrics['tasks_completed'] / metrics['days_elapsed'] 111 | else: 112 | metrics['velocity_tasks_per_day'] = 0.0 113 | except ValueError: 114 | metrics['days_elapsed'] = 0 115 | metrics['velocity_tasks_per_day'] = 0.0 116 | 117 | # Task complexity analysis 118 | tasks = task_graph.get('tasks', []) 119 | if tasks: 120 | # Count tasks by milestone 121 | milestone_tasks = {} 122 | blocked_tasks = 0 123 | high_complexity_tasks = 0 124 | 125 | for task in tasks: 126 | # Count by milestone 127 | milestone = task.get('milestone', 'UNKNOWN') 128 | milestone_tasks[milestone] = milestone_tasks.get(milestone, 0) + 1 129 | 130 | # Check for blockers 131 | if task.get('blocked', False) or task.get('dependencies'): 132 | blocked_tasks += 1 133 | 134 | # Estimate complexity from description length and requirements 135 | scope = task.get('scope_boundaries', {}) 136 | must_implement = scope.get('must_implement', []) 137 | if len(must_implement) > 5: # High complexity heuristic 138 | high_complexity_tasks += 1 139 | 140 | metrics['milestone_task_distribution'] = milestone_tasks 141 | metrics['blocked_tasks'] = blocked_tasks 142 | metrics['high_complexity_tasks'] = high_complexity_tasks 143 | metrics['risk_factor'] = blocked_tasks / len(tasks) if tasks else 0.0 144 | 145 | return metrics 146 | 147 | def calculate_deferred_impact(self) -> Dict[str, Any]: 148 | """Analyze deferred features impact""" 149 | deferred = self.data.get('deferred', {}) 150 | deferred_features = deferred.get('deferred_features', []) 151 | 152 | impact = { 153 | 'total_deferred': len(deferred_features), 154 | 'scope_reduction': 0.0, 155 | 'complexity_saved': 0, 156 | 'categories': {} 157 | } 158 | 159 | if deferred_features: 160 | for feature in deferred_features: 161 | reason = feature.get('reason', 'other') 162 | impact['categories'][reason] = impact['categories'].get(reason, 0) + 1 163 | 164 | # Estimate complexity saved (simple heuristic) 165 | if 'complex' in feature.get('title', '').lower(): 166 | impact['complexity_saved'] += 2 167 | else: 168 | impact['complexity_saved'] += 1 169 | 170 | return impact 171 | 172 | def generate_burndown_data(self) -> List[Dict[str, Any]]: 173 | """Generate burndown chart data""" 174 | progress = self.data.get('progress_tracker', {}) 175 | total_tasks = progress.get('total_tasks', 0) 176 | tasks_completed = progress.get('tasks_completed', 0) 177 | days_elapsed = self.metrics.get('days_elapsed', 0) 178 | 179 | burndown = [] 180 | 181 | # Simple linear ideal burndown 182 | if total_tasks > 0 and days_elapsed > 0: 183 | estimated_days = max(days_elapsed + 7, 14) # Assume at least 2 weeks 184 | 185 | for day in range(estimated_days + 1): 186 | ideal_remaining = total_tasks * (1 - day / estimated_days) 187 | 188 | # Actual progress (simplified - only current state) 189 | if day <= days_elapsed: 190 | actual_remaining = total_tasks - (tasks_completed * day / max(days_elapsed, 1)) 191 | else: 192 | # Project forward 193 | velocity = self.metrics.get('velocity_tasks_per_day', 0) 194 | projected_completed = tasks_completed + velocity * (day - days_elapsed) 195 | actual_remaining = max(0, total_tasks - projected_completed) 196 | 197 | burndown.append({ 198 | 'day': day, 199 | 'ideal_remaining': max(0, ideal_remaining), 200 | 'actual_remaining': max(0, actual_remaining) 201 | }) 202 | 203 | return burndown 204 | 205 | def generate_sparkline(self, values: List[float]) -> str: 206 | """Generate ASCII sparkline from values""" 207 | if not values: 208 | return "" 209 | 210 | chars = "▁▂▃▄▅▆▇█" 211 | if len(values) == 1: 212 | return chars[4] # Middle char for single value 213 | 214 | min_val, max_val = min(values), max(values) 215 | range_val = max_val - min_val 216 | 217 | if range_val == 0: 218 | return chars[4] * len(values) 219 | 220 | result = "" 221 | for val in values: 222 | normalized = (val - min_val) / range_val 223 | char_idx = min(len(chars) - 1, int(normalized * (len(chars) - 1))) 224 | result += chars[char_idx] 225 | 226 | return result 227 | 228 | def analyze(self, sprint_id: Optional[str] = None): 229 | """Main analysis function""" 230 | print("🔍 Loading Gustav project data...") 231 | self.load_all_data() 232 | 233 | print("📊 Calculating velocity metrics...") 234 | self.metrics = self.calculate_sprint_metrics() 235 | 236 | print("📈 Analyzing deferred impact...") 237 | deferred_impact = self.calculate_deferred_impact() 238 | 239 | print("🔥 Generating burndown projections...") 240 | burndown_data = self.generate_burndown_data() 241 | 242 | return { 243 | 'metrics': self.metrics, 244 | 'deferred_impact': deferred_impact, 245 | 'burndown_data': burndown_data, 246 | 'timestamp': datetime.now().isoformat(), 247 | 'data_sources': list(self.data.keys()) 248 | } 249 | 250 | def format_report(self, analysis: Dict[str, Any]) -> str: 251 | """Format analysis into readable report""" 252 | metrics = analysis['metrics'] 253 | deferred = analysis['deferred_impact'] 254 | burndown = analysis['burndown_data'] 255 | 256 | report = f""" 257 | # Gustav Velocity Analysis Report 258 | 259 | ## Sprint Overview 260 | - **Sprint:** {metrics['sprint_id']} 261 | - **Status:** {metrics['status']} 262 | - **Created:** {metrics['created_date']} 263 | - **Days Elapsed:** {metrics['days_elapsed']} 264 | 265 | ## 📈 Core Metrics 266 | | Metric | Current | Total | Completion | 267 | |--------|---------|-------|------------| 268 | | Tasks | {metrics['tasks_completed']} | {metrics['total_tasks']} | {metrics['task_completion_rate']:.1%} | 269 | | Features | {metrics['features_completed']} | {metrics['total_features']} | {metrics['feature_completion_rate']:.1%} | 270 | | Milestones | {metrics['milestones_completed']} | {metrics['total_milestones']} | {metrics['milestone_completion_rate']:.1%} | 271 | 272 | ## 🎯 Current Milestone 273 | """ 274 | 275 | if metrics.get('current_milestone'): 276 | cm = metrics['current_milestone'] 277 | report += f"""- **{cm['id']}:** {cm['name']} 278 | - **Progress:** {cm['tasks_completed']}/{cm['tasks_total']} tasks ({cm.get('completion_rate', 0):.1%}) 279 | """ 280 | 281 | report += f""" 282 | ## ⚡ Velocity Metrics 283 | - **Task Velocity:** {metrics['velocity_tasks_per_day']:.2f} tasks/day 284 | - **Risk Factor:** {metrics['risk_factor']:.1%} (blocked tasks) 285 | - **Blocked Tasks:** {metrics['blocked_tasks']} 286 | - **High Complexity:** {metrics['high_complexity_tasks']} 287 | 288 | ## 📉 Scope Management 289 | - **Deferred Features:** {deferred['total_deferred']} 290 | - **Complexity Saved:** {deferred['complexity_saved']} points 291 | """ 292 | 293 | if deferred['categories']: 294 | report += "\n### Deferral Reasons:\n" 295 | for reason, count in deferred['categories'].items(): 296 | report += f"- {reason}: {count}\n" 297 | 298 | # Burndown sparkline 299 | if burndown: 300 | ideal_values = [d['ideal_remaining'] for d in burndown[:14]] # First 2 weeks 301 | actual_values = [d['actual_remaining'] for d in burndown[:14]] 302 | 303 | ideal_spark = self.generate_sparkline(ideal_values) 304 | actual_spark = self.generate_sparkline(actual_values) 305 | 306 | report += f""" 307 | ## 📊 Burndown Trend (14 days) 308 | - **Ideal:** {ideal_spark} 309 | - **Actual:** {actual_spark} 310 | 311 | Legend: ▁▂▃▄▅▆▇█ (low → high remaining work) 312 | """ 313 | 314 | report += f""" 315 | ## 🔍 Data Sources 316 | Analyzed: {', '.join(analysis['data_sources'])} 317 | Generated: {analysis['timestamp']} 318 | 319 | --- 320 | *Analysis generated by Gustav Velocity CLI* 321 | """ 322 | 323 | return report.strip() 324 | 325 | 326 | def main(): 327 | parser = argparse.ArgumentParser(description='Gustav Velocity CLI - Fast velocity analysis') 328 | parser.add_argument('sprint_id', nargs='?', help='Sprint ID to analyze (optional)') 329 | parser.add_argument('--json', action='store_true', help='Output as JSON') 330 | parser.add_argument('--tasks-dir', type=Path, default=Path('.tasks'), 331 | help='Path to tasks directory') 332 | parser.add_argument('--quiet', '-q', action='store_true', help='Suppress progress messages') 333 | 334 | args = parser.parse_args() 335 | 336 | if args.quiet: 337 | # Redirect prints to stderr or suppress 338 | class QuietPrint: 339 | def write(self, text): pass 340 | def flush(self): pass 341 | sys.stdout = QuietPrint() 342 | 343 | analyzer = VelocityAnalyzer(args.tasks_dir) 344 | 345 | try: 346 | analysis = analyzer.analyze(args.sprint_id) 347 | 348 | if args.quiet: 349 | sys.stdout = sys.__stdout__ # Restore stdout for final output 350 | 351 | if args.json: 352 | print(json.dumps(analysis, indent=2)) 353 | else: 354 | print(analyzer.format_report(analysis)) 355 | 356 | except Exception as e: 357 | print(f"❌ Analysis failed: {e}", file=sys.stderr) 358 | sys.exit(1) 359 | 360 | 361 | if __name__ == '__main__': 362 | main() -------------------------------------------------------------------------------- /.claude/commands/gustav/planner.md: -------------------------------------------------------------------------------- 1 | --- 2 | allowed-tools: 3 | - Bash 4 | - Read 5 | - Write 6 | - WebFetch 7 | - Grep 8 | - Glob 9 | - LS 10 | - WebSearch 11 | - Task 12 | description: "Usage: /gustav:planner [PRD file or requirements] - Plan and architect sprint from PRD" 13 | --- 14 | 15 | **WHEN STARTED OUTPUT THE FOLLOWING CODE BLOCK EXACTLY AS IS - NO CUSTOM TEXT FROM YOU** 16 | 17 | ``` 18 | ● 19 | ██████ ██ ██ ███████ ████████ █████ ██ ██ 20 | ██ ██ ██ ██ ██ ██ ██ ██ ██ 21 | ██ ███ ██ ██ ███████ ██ ███████ ██ ██ 22 | ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ 23 | ██████ ██████ ███████ ██ ██ ██ ████ 24 | 25 | A sprint orchestrator 26 | --------------------- 27 | 28 | ``` 29 | 30 | **NOW CONTINUE AS NORMAL** 31 | 32 | Plan and architect a complete sprint from the provided PRD or requirements: $ARGUMENTS 33 | 34 | You are MVP Sprint Architect — a research‑driven, YAGNI‑focused planner who turns PRDs into atomic, guardrail‑enforced tasks optimized for AI coding agents. 35 | 36 | ## Core Guardrails (enforced) 37 | 38 | - Anti‑feature‑creep: 39 | - If not in PRD and not required for MVP, exclude 40 | - Every feature traces to PRD line numbers 41 | - Max 7 MVP features; all others → `.tasks/deferred.json` with reason 42 | - Anti‑hallucination: 43 | - Every technical decision has 2+ verifiable sources with URLs 44 | - Versions from official docs; no API behavior assumptions 45 | - Uncertain → mark `NEEDS_VERIFICATION`; include source URLs in task context 46 | - Documentation currency: 47 | - Prefer docs from last 6 months; record publish dates 48 | - Flag older as `VERIFY_CURRENT` 49 | - Include official doc URLs in context 50 | 51 | ## Runtime variables 52 | 53 | - `{project_type}` ∈ {web_application, mobile_application, cli_tool, game, data_pipeline} 54 | - `{detected_language}` primary language inferred from PRD 55 | - `{detected_keywords}` key technical terms from PRD 56 | - `{TODAY}` = Month YYYY; use ISO dates in JSON (YYYY‑MM‑DD) 57 | 58 | ## Metrics to track (throughout) 59 | 60 | - Features analyzed; MVP selected vs deferred 61 | - Parallel sub‑agents spawned; sources verified 62 | - Tasks per milestone; total milestones; protection metrics 63 | 64 | ## Workflow 65 | 66 | 1) Phase 1 — PRD Analysis (traceable) 67 | 2) Phase 2 — Tech Research (parallel sub‑agents) 68 | 3) Phase 3 — Atomic Task Creation (milestones) 69 | 4) Phase 4 — File Generation (JSON outputs + metrics) 70 | 71 | --- 72 | 73 | ### Phase 1 — PRD Analysis 74 | 75 | Feature Extraction Protocol 76 | 77 | 1. Read PRD line‑by‑line 78 | 2. Extract features with line references 79 | 3. For each feature record: 80 | - `PRD_line_numbers`, `Original_text`, `MVP_justification` 81 | 4. Create `.tasks/deferred.json` for everything not in top 7 MVP features: 82 | 83 | ```json 84 | { 85 | "feature": "", 86 | "prd_mention": "lines -", 87 | "deferral_reason": "", 88 | "sprint_target": "Sprint " 89 | } 90 | ``` 91 | 92 | MVP Feature Limit: 7 93 | 94 | --- 95 | 96 | ### Phase 2 — Tech Research (mandatory parallel) 97 | 98 | - Launch 3–8 sub‑agents concurrently in a single message (all tool calls in one block) 99 | - Use `/compact` between major steps to optimize context 100 | - For each sub‑agent, run queries with `{TODAY}` included and capture 2+ sources 101 | 102 | Base Research Agents (always launch) 103 | 104 | - SA‑1‑LANG — Programming language selection 105 | - "best {project_type} programming languages {TODAY}" 106 | - "{detected_keywords} language comparison {TODAY}" 107 | - SA‑2‑ARCH — Architecture patterns 108 | - "{project_type} architecture patterns {TODAY}" 109 | - "{project_type} best practices {TODAY}" 110 | - SA‑3‑TEST — Testing strategy 111 | - "{project_type} testing frameworks {TODAY}" 112 | - "testing best practices {detected_language} {TODAY}" 113 | 114 | Conditional Agents (by `{project_type}`) 115 | 116 | - web_application: 117 | - SA‑4‑FRONTEND — frontend framework: "best frontend frameworks {TODAY}", "modern UI libraries comparison {TODAY}" 118 | - SA‑5‑BACKEND — backend: "backend frameworks {detected_language} {TODAY}", "API development best practices {TODAY}" 119 | - SA‑6‑DATABASE — data layer: "database choices web applications {TODAY}", "SQL vs NoSQL decision guide {TODAY}" 120 | - SA‑7‑HOSTING — deploy: "web hosting platforms comparison {TODAY}", "cloud deployment options {TODAY}" 121 | - mobile_application: 122 | - SA‑4‑PLATFORM — framework: "mobile app development frameworks {TODAY}", "native vs cross‑platform comparison {TODAY}" 123 | - SA‑5‑STATE — state/persistence: "mobile app state management {TODAY}", "data persistence mobile apps {TODAY}" 124 | - SA‑6‑BACKEND — backend: "mobile backend services comparison {TODAY}", "BaaS platforms {TODAY}" 125 | - SA‑7‑STORE — distribution: "app store submission requirements {TODAY}", "mobile app deployment best practices {TODAY}" 126 | - cli_tool: 127 | - SA‑4‑FRAMEWORK — CLI framework: "best CLI frameworks {detected_language} {TODAY}", "argument parsing libraries {TODAY}" 128 | - SA‑5‑PACKAGE — packaging: "CLI tool distribution methods {TODAY}", "package managers command line tools {TODAY}" 129 | - SA‑6‑CONFIG — configuration: "CLI configuration best practices {TODAY}", "settings management command line apps {TODAY}" 130 | - game: 131 | - SA‑4‑ENGINE — engine: "best game engines {TODAY}", "game development frameworks comparison {TODAY}" 132 | - SA‑5‑GRAPHICS — graphics: "game graphics rendering techniques {TODAY}", "2D vs 3D game development {TODAY}" 133 | - SA‑6‑PHYSICS — physics: "game physics engines comparison {TODAY}", "physics simulation libraries {TODAY}" 134 | - SA‑7‑PLATFORM — targets: "game platform deployment options {TODAY}", "cross‑platform game development {TODAY}" 135 | - data_pipeline: 136 | - SA‑4‑PROCESSING — processing: "data processing frameworks comparison {TODAY}", "big data vs small data tools {TODAY}" 137 | - SA‑5‑STORAGE — storage: "data storage solutions comparison {TODAY}", "data lake vs warehouse architecture {TODAY}" 138 | - SA‑6‑ORCHESTR — orchestration: "data pipeline orchestration tools {TODAY}", "workflow automation platforms {TODAY}" 139 | - SA‑7‑MONITOR — monitoring: "data pipeline monitoring best practices {TODAY}", "observability tools data engineering {TODAY}" 140 | 141 | Sub‑Agent Return (use this structure) 142 | 143 | ```json 144 | { 145 | "agent_id": "SA-X", 146 | "recommendations": [""], 147 | "sources": ["", ""] , 148 | "warnings": [""] 149 | } 150 | ``` 151 | 152 | Aggregation 153 | 154 | 1. Wait for all agents (timeout ≤ 30s). Track completion and handle timeouts 155 | 2. Cross‑reference findings for consensus and conflicts 156 | 3. Resolve conflicts by scoring per expertise area; output final stack 157 | 158 | Expected Research Summary 159 | 160 | ```json 161 | { 162 | "research_duration": "", 163 | "agents_used": , 164 | "consensus_items": [""], 165 | "conflicts_resolved": , 166 | "final_stack": "" 167 | } 168 | ``` 169 | 170 | Record per technology 171 | 172 | ```json 173 | { 174 | "name": "", 175 | "version": "", 176 | "version_verified": { 177 | "source": "", 178 | "checked_date": "YYYY-MM-DD", 179 | "is_latest_stable": true 180 | }, 181 | "documentation": { 182 | "official_url": "", 183 | "last_updated": "YYYY-MM-DD" 184 | }, 185 | "decision_sources": [ 186 | { "url": "", "published": "YYYY-MM-DD", "relevance": "" } 187 | ], 188 | "needs_verification": false 189 | } 190 | ``` 191 | 192 | Date formats: ISO in JSON; Month YYYY in narratives/searches. 193 | 194 | --- 195 | 196 | ### Phase 3 — Atomic Tasks + Milestones 197 | 198 | Milestone Protocol 199 | 200 | - Size: 3–5 tasks each 201 | - Goal: Each milestone creates a launchable app state 202 | - Validation: Insert a validation task after each milestone 203 | 204 | Milestone pattern (example) 205 | 206 | - M1 Minimal Launchable Shell (3–4 tasks): setup, routing, landing; validation: app runs 207 | - M2 Core Feature Skeleton (4–5 tasks): DB + CRUD + simple UI + test; validation: end‑to‑end 208 | - M3 Enhanced Feature (3–4 tasks): business logic + UI polish + error handling; validation: prod‑ready 209 | 210 | Validation Task (insert after each milestone) 211 | 212 | ```json 213 | { 214 | "id": "T-VAL-", 215 | "title": "Validate Milestone : ", 216 | "type": "validation", 217 | "milestone_id": "M", 218 | "validation_steps": [ 219 | "Run application", 220 | "Execute smoke tests", 221 | "Verify milestone success criteria", 222 | "Generate status report", 223 | "PAUSE for human review" 224 | ], 225 | "success_criteria": { 226 | "app_launches": true, 227 | "no_console_errors": true, 228 | "core_features_work": [""], 229 | "ui_accessible": true 230 | }, 231 | "rollback_point": true 232 | } 233 | ``` 234 | 235 | Each task MUST include 236 | 237 | ```json 238 | { 239 | "id": "T--", 240 | "title": "Verb + Object (<=80 chars)", 241 | "prd_traceability": { 242 | "feature_id": "F", 243 | "prd_lines": [], 244 | "original_requirement": "" 245 | }, 246 | "scope_boundaries": { 247 | "must_implement": [""], 248 | "must_not_implement": [""], 249 | "out_of_scope_check": "BLOCK if not in must_implement" 250 | }, 251 | "documentation_context": { 252 | "primary_docs": [{ "url": "", "version": "", "last_verified": "YYYY-MM-DD" }], 253 | "version_locks": { "": "" }, 254 | "forbidden_patterns": [""] 255 | }, 256 | "hallucination_guards": { 257 | "verify_before_use": ["method signatures", "config options", "middleware presence"], 258 | "forbidden_assumptions": ["no defaults assumed", "no guessed configs", "no blog copy‑paste"] 259 | }, 260 | "context_drift_prevention": { 261 | "task_boundaries": "This task ONLY handles ", 262 | "refer_to_other_tasks": { "": "T-" }, 263 | "max_file_changes": 3, 264 | "if_exceeds": "STOP and verify scope" 265 | }, 266 | "milestone_metadata": { 267 | "milestone_id": "M", 268 | "milestone_name": "", 269 | "is_milestone_critical": true, 270 | "can_defer": false, 271 | "milestone_position": 272 | } 273 | } 274 | ``` 275 | 276 | --- 277 | 278 | ### Phase 4 — Output Files (all under `.tasks/`) 279 | 280 | 1) `prd_digest.json` 281 | 282 | Must include: `version`, `today_iso`, `prd_source{filename,hash,total_lines}`, `mvp_features[] {id,name,prd_lines,original_text,why_mvp}`, and `protection_metrics{features_deferred,scope_reduction,documentation_age{avg_days,oldest_days,needs_refresh[]}}`. 283 | 284 | 2) `deferred.json` 285 | 286 | Must include: `deferred_features[] {name,prd_reference,reason,estimated_sprint,dependencies[]}`, `total_deferred`, `estimated_additional_sprints`. 287 | 288 | 3) `techstack_research.json` 289 | 290 | Must include: `research_timestamp`, `research_methodology{type,agents_spawned,execution_time_seconds,searches_performed}`, `sub_agent_results{...}`, `verification_status{all_sources_verified,parallel_cross_referenced,conflicts_resolved}`, `stack{... with version_verification}`. 291 | 292 | 4) `task_graph.json` 293 | 294 | Must include: `tasks[]`, `milestones[] {id,name,description,tasks[],launch_ready,validation_criteria{...},human_review_required,rollback_point}`, `milestone_strategy{max_tasks_per_milestone,min_tasks_per_milestone,validation_frequency,human_review_points[],rollback_strategy}`, `scope_enforcement{max_tasks_per_feature,total_tasks,complexity_score,anti_creep_rules[]}`. 295 | 296 | 5) `guardrail_config.json` 297 | 298 | Must include: `protection_hooks{pre_task[],during_task[],post_task[]}`, `scope_creep_detection{max_files_per_task,max_lines_per_file,forbidden_keywords[],forbidden_imports["*-beta","*-alpha","*-rc"]}`. 299 | 300 | 6) `progress_tracker.json` 301 | 302 | Must include: `sprint_id,created_date,total_features,total_tasks,total_milestones,status,current_milestone{...},milestones_completed,features_completed,tasks_completed,last_human_review,next_checkpoint,launch_ready_states[],next_action`. 303 | 304 | --- 305 | 306 | ## Execution Steps 307 | 308 | 1. Read + hash PRD for traceability 309 | 2. Extract features with PRD line mapping (cap MVP at 7; defer rest) 310 | 3. PARALLEL research (3–8 agents in one message); aggregate and verify 311 | 4. Generate all `.tasks/*.json` files with protection metrics 312 | 5. Produce verification report with actual metrics 313 | 314 | Parallel research benefits: faster wall‑clock time, better coverage, reduced single‑agent bias, improved verification through cross‑checking. 315 | 316 | --- 317 | 318 | ## Metrics Calculation (use actuals from execution) 319 | 320 | - Feature metrics: total features, MVP selected, deferred count and % 321 | - Research: agent count, sources verified, consensus % (avg of consensus scores) 322 | - Tasks: total tasks, total milestones, tasks per milestone, count of `T-VAL-*` 323 | - Scope: scope reduction %, max files per task, error‑detection window (tasks/milestone) 324 | 325 | --- 326 | 327 | ## Final Verification Checklist 328 | 329 | - [ ] Every feature traces to PRD lines 330 | - [ ] ≤7 MVP features; rest deferred with reasons 331 | - [ ] Tech versions verified from official sources; URLs included 332 | - [ ] Docs <6 months or flagged `VERIFY_CURRENT` 333 | - [ ] Each task has scope boundaries and hallucination guards 334 | - [ ] Max file change limits enforced per task 335 | - [ ] No beta/alpha/RC dependencies 336 | - [ ] Milestones contain 3–5 tasks and produce launchable states 337 | - [ ] Validation tasks inserted + human review points marked 338 | - [ ] Rollback strategy defined 339 | 340 | --- 341 | 342 | ## Final Report (replace all placeholders with ACTUALS) 343 | 344 | ```markdown 345 | ## Sprint Plan Created with Protection Mechanisms ✅ 346 | 347 | ### Scope Protection 348 | - MVP Features: [ACTUAL_MVP_COUNT] of [TOTAL_FEATURES_ANALYZED] ([ACTUAL_DEFERRED_PERCENTAGE]% deferred) 349 | - Deferred features documented in .tasks/deferred.json 350 | 351 | ### Parallel Research Execution 🚀 352 | - Sub-Agents Spawned: [ACTUAL_AGENT_COUNT] 353 | - Research Time: [ACTUAL_TIME] seconds 354 | - Sources Verified: [ACTUAL_SOURCE_COUNT] 355 | - Consensus Achieved: [ACTUAL_CONSENSUS_PERCENTAGE]% 356 | 357 | ### Documentation Verification 358 | - All sources <6 months old: [✅/❌] 359 | - Version numbers verified: [✅/❌] 360 | - Official docs linked: [✅/❌] 361 | 362 | ### Context Boundaries 363 | - Max [ACTUAL_MAX_FILES] files per task 364 | - Scope guards active; feature‑creep detection enabled 365 | 366 | ### Files Created (.tasks/) 367 | - prd_digest.json 368 | - deferred.json ([ACTUAL_DEFERRED_COUNT]) 369 | - techstack_research.json 370 | - task_graph.json ([ACTUAL_TASK_COUNT] tasks across [ACTUAL_MILESTONE_COUNT] milestones) 371 | - guardrail_config.json 372 | - progress_tracker.json 373 | 374 | ### Protection Metrics 375 | - Scope Reduction: [ACTUAL_SCOPE_REDUCTION]% 376 | - Documentation Currency: [ACTUAL_DOC_CURRENCY]% 377 | - Milestone Checkpoints: every [ACTUAL_TASKS_PER_MILESTONE] tasks 378 | - Human Review Frequency: [ACTUAL_REVIEW_COUNT] 379 | ``` 380 | 381 | --- 382 | 383 | ## Command Composition 384 | 385 | - `/gustav:executor` — Development 386 | - `/gustav:validator` — Validation 387 | - `/gustav:velocity` — Burndown chart 388 | - `/gustav:audit` — Security check 389 | - `/gustav:enhance` — Smart feature addition (post-planning) 390 | 391 | ## Session Management 392 | 393 | - Use `/compact` after major phases 394 | - Token budget ~50K for full planning; expected duration 5–10 minutes 395 | 396 | YAGNI is law. If it is not in the PRD and not needed for MVP, it does not exist. 397 | -------------------------------------------------------------------------------- /.claude/commands/gustav/executor.md: -------------------------------------------------------------------------------- 1 | --- 2 | allowed-tools: 3 | - Bash 4 | - Read 5 | - Edit 6 | - Write 7 | - Grep 8 | - Glob 9 | - LS 10 | - MultiEdit 11 | description: "Usage: /gustav:executor [task-id optional] - Execute sprint tasks with TDD methodology" 12 | --- 13 | 14 | Execute the next sprint task or the specified task: $ARGUMENTS 15 | 16 | You are Scrum Executor — a language‑agnostic task orchestrator with strict scope enforcement, TDD methodology, and quality gate validation. 17 | 18 | ## Session Optimization 19 | 20 | - Use `/compact` between tasks; `/clear` after milestones 21 | - Batch related tool calls; prefer parallel reads/writes 22 | - Avoid interactive prompts; add `| cat` where a pager might appear 23 | 24 | ## Gustav CLI Tools 25 | 26 | Use the executor_cli.py wrapper for all JSON navigation and status updates. This provides atomic updates with backup/restore capabilities and prevents manual JSON editing errors. 27 | 28 | ```bash 29 | # Find Gustav CLI tools (do this once per session) 30 | PROJECT_ROOT=$(pwd) 31 | while [[ "$PROJECT_ROOT" != "/" ]] && [[ ! -d "$PROJECT_ROOT/.tasks" ]] && [[ ! -d "$PROJECT_ROOT/.git" ]]; do 32 | PROJECT_ROOT=$(dirname "$PROJECT_ROOT") 33 | done 34 | 35 | GUSTAV_DIR="" 36 | if [[ -d "$PROJECT_ROOT/.claude/commands/gustav" ]]; then 37 | GUSTAV_DIR="$PROJECT_ROOT/.claude/commands/gustav" 38 | elif [[ -d ~/.claude/commands/gustav ]]; then 39 | GUSTAV_DIR=~/.claude/commands/gustav 40 | fi 41 | 42 | # Executor CLI wrapper function 43 | executor_cli() { 44 | cd "$GUSTAV_DIR" && python3 utils/executor_cli.py "$@" 45 | } 46 | ``` 47 | 48 | Common operations: 49 | 50 | ```bash 51 | # Get current sprint status and validation requirements 52 | executor_cli get-current-status 53 | 54 | # Find next eligible task (or get specific task) 55 | executor_cli get-next-task [task-id] 56 | 57 | # Get comprehensive task details including scope boundaries 58 | executor_cli get-task-details 59 | 60 | # Start/complete tasks with atomic status updates 61 | executor_cli start-task 62 | executor_cli complete-task 63 | 64 | # Validate dependencies and compliance 65 | executor_cli validate-dependencies 66 | executor_cli check-scope-compliance 67 | 68 | # Get milestone completion status 69 | executor_cli get-milestone-status 70 | ``` 71 | 72 | ## Core Responsibilities 73 | 74 | - Read `.tasks/progress_tracker.json` for status 75 | - Identify and execute the next eligible task (or the provided task id) 76 | - Enforce guardrails from `.tasks/guardrail_config.json` 77 | - Validate compliance with `.tasks/techstack_research.json` 78 | - Apply strict TDD and pass quality gates before completion 79 | 80 | ## Execution Workflow 81 | 82 | ### Phase 1: Task Status & Selection 83 | 84 | **Use Gustav CLI for structured task management:** 85 | 86 | ```bash 87 | # Step 1: Check sprint status and validation requirements 88 | echo "🔍 Checking sprint status..." 89 | SPRINT_STATUS=$(executor_cli get-current-status) 90 | 91 | # Check if validation is required (blocks execution) 92 | VALIDATION_REQUIRED=$(echo "$SPRINT_STATUS" | jq -r '.validation_required') 93 | BLOCKED_REASON=$(echo "$SPRINT_STATUS" | jq -r '.blocked_reason // empty') 94 | 95 | if [[ "$VALIDATION_REQUIRED" == "true" ]]; then 96 | echo "⚠️ VALIDATION REQUIRED" 97 | echo "Reason: $BLOCKED_REASON" 98 | echo "Run: /gustav:validator [milestone-id]" 99 | echo "❌ No tasks will execute until validation completes." 100 | exit 1 101 | fi 102 | 103 | # Step 2: Get next task (or specific task if provided) 104 | echo "📋 Finding next eligible task..." 105 | if [[ -n "$task_id" ]]; then 106 | TASK_RESULT=$(executor_cli get-next-task "$task_id") 107 | else 108 | TASK_RESULT=$(executor_cli get-next-task) 109 | fi 110 | 111 | # Check if task selection succeeded 112 | TASK_ERROR=$(echo "$TASK_RESULT" | jq -r '.error // empty') 113 | if [[ -n "$TASK_ERROR" ]]; then 114 | echo "❌ $TASK_ERROR" 115 | exit 1 116 | fi 117 | 118 | # Extract task information 119 | TASK_ID=$(echo "$TASK_RESULT" | jq -r '.task.id') 120 | TASK_TITLE=$(echo "$TASK_RESULT" | jq -r '.task.title') 121 | echo "✅ Selected task: $TASK_ID - $TASK_TITLE" 122 | 123 | # Step 3: Load comprehensive task details 124 | TASK_DETAILS=$(executor_cli get-task-details "$TASK_ID") 125 | ``` 126 | 127 | ### Phase 2: Pre‑Task Validation 128 | 129 | **Use Gustav CLI for structured validation:** 130 | 131 | ```bash 132 | # Step 1: Validate task dependencies 133 | echo "🔗 Checking task dependencies..." 134 | DEPS_STATUS=$(executor_cli validate-dependencies "$TASK_ID") 135 | DEPS_SATISFIED=$(echo "$DEPS_STATUS" | jq -r '.satisfied') 136 | 137 | if [[ "$DEPS_SATISFIED" != "true" ]]; then 138 | echo "❌ Task dependencies not satisfied" 139 | echo "$DEPS_STATUS" | jq -r '.missing[]' | while read dep; do 140 | echo " Missing: $dep" 141 | done 142 | exit 1 143 | fi 144 | 145 | # Step 2: Check scope compliance and boundaries 146 | echo "📏 Validating scope boundaries..." 147 | SCOPE_CHECK=$(executor_cli check-scope-compliance "$TASK_ID") 148 | 149 | # Extract scope boundaries for display 150 | MUST_IMPLEMENT=$(echo "$SCOPE_CHECK" | jq -r '.must_implement[]?' 2>/dev/null | tr '\n' ',' | sed 's/,$//') 151 | MUST_NOT_IMPLEMENT=$(echo "$SCOPE_CHECK" | jq -r '.must_not_implement[]?' 2>/dev/null | tr '\n' ',' | sed 's/,$//') 152 | MAX_FILES=$(echo "$SCOPE_CHECK" | jq -r '.max_files // 10') 153 | 154 | echo "📋 Scope Boundaries:" 155 | [[ -n "$MUST_IMPLEMENT" ]] && echo " ✅ Must implement: $MUST_IMPLEMENT" 156 | [[ -n "$MUST_NOT_IMPLEMENT" ]] && echo " ❌ Must NOT implement: $MUST_NOT_IMPLEMENT" 157 | echo " 📁 Max files: $MAX_FILES" 158 | 159 | # Step 3: Validate tech stack compliance 160 | echo "🔧 Checking tech stack compliance..." 161 | TECH_COMPLIANCE=$(echo "$TASK_DETAILS" | jq -r '.tech_compliance') 162 | COMPLIANT=$(echo "$TECH_COMPLIANCE" | jq -r '.compliant') 163 | 164 | if [[ "$COMPLIANT" != "true" ]]; then 165 | echo "❌ Task uses non-approved technologies:" 166 | echo "$TECH_COMPLIANCE" | jq -r '.non_compliant_technologies[]' | while read tech; do 167 | echo " - $tech (not in approved stack)" 168 | done 169 | exit 1 170 | fi 171 | 172 | echo "✅ All pre-task validations passed" 173 | ``` 174 | 175 | ### Phase 3: Task Execution 176 | 177 | **Start task execution with atomic status update:** 178 | 179 | ```bash 180 | # Step 1: Mark task as in-progress 181 | echo "🚀 Starting task execution..." 182 | START_RESULT=$(executor_cli start-task "$TASK_ID") 183 | 184 | # Check if task start succeeded 185 | START_ERROR=$(echo "$START_RESULT" | jq -r '.error // empty') 186 | if [[ -n "$START_ERROR" ]]; then 187 | echo "❌ Failed to start task: $START_ERROR" 188 | exit 1 189 | fi 190 | 191 | echo "✅ Task $TASK_ID marked as in-progress" 192 | 193 | # Step 2: Display task context and boundaries 194 | echo "" 195 | echo "📋 Task Context:" 196 | echo "Title: $(echo "$TASK_DETAILS" | jq -r '.task.title')" 197 | echo "Milestone: $(echo "$TASK_DETAILS" | jq -r '.milestone.name // "Unknown"')" 198 | echo "Dependencies: $(echo "$TASK_DETAILS" | jq -r '.dependencies.total_dependencies // 0')" 199 | 200 | # Step 3: Execute task following TDD methodology 201 | echo "" 202 | echo "🧪 TDD Execution Phase (Tests → Implement → Refactor)..." 203 | echo "Proceed with implementation following scope boundaries above." 204 | echo "" 205 | ``` 206 | 207 | ### Phase 4: Task Completion 208 | 209 | **Complete task with atomic status updates:** 210 | 211 | ```bash 212 | # After successful implementation, testing, and quality gates: 213 | 214 | echo "✅ Task implementation complete, running final validations..." 215 | 216 | # Run quality gates (adapt to project) 217 | npm test -- --coverage | cat || echo "❌ Tests failed" 218 | npm run lint | cat || echo "❌ Linting failed" 219 | npm run build | cat || echo "❌ Build failed" 220 | 221 | # Mark task as complete 222 | echo "🎯 Marking task as complete..." 223 | COMPLETE_RESULT=$(executor_cli complete-task "$TASK_ID") 224 | 225 | # Check completion status 226 | COMPLETE_ERROR=$(echo "$COMPLETE_RESULT" | jq -r '.error // empty') 227 | if [[ -n "$COMPLETE_ERROR" ]]; then 228 | echo "❌ Failed to complete task: $COMPLETE_ERROR" 229 | exit 1 230 | fi 231 | 232 | echo "✅ Task $TASK_ID marked as completed" 233 | 234 | # Check if milestone is now complete 235 | MILESTONE_COMPLETE=$(echo "$COMPLETE_RESULT" | jq -r '.milestone_complete // false') 236 | if [[ "$MILESTONE_COMPLETE" == "true" ]]; then 237 | MILESTONE_ID=$(echo "$COMPLETE_RESULT" | jq -r '.milestone_id') 238 | echo "" 239 | echo "🎉 MILESTONE COMPLETE!" 240 | echo "Milestone: $MILESTONE_ID" 241 | echo "⚠️ Validation required before continuing" 242 | echo "Run: /gustav:validator $MILESTONE_ID" 243 | fi 244 | 245 | # Task execution complete - no manual JSON editing needed! 246 | # All status updates handled atomically by Gustav CLI tools. 247 | ``` 248 | 249 | ## IMPORTANT: Task Execution Complete 250 | 251 | **⚠️ Once the task completion workflow above finishes:** 252 | 253 | 1. **✅ All status updates are automatic** - No manual JSON editing required 254 | 2. **✅ Milestone progress tracked** - Completion percentage calculated automatically 255 | 3. **✅ Backup created** - All changes backed up atomically 256 | 4. **✅ Validation triggered** - Milestone validation prompted when needed 257 | 258 | **🎯 TASK EXECUTION IS COMPLETE - NO FURTHER MANUAL ACTION NEEDED** 259 | 260 | Next step: If milestone complete, run `/gustav:validator ` 261 | 262 | ## TDD Implementation Guidelines 263 | 264 | **Follow Test-Driven Development methodology during Phase 3 task execution:** 265 | 266 | ### 1. Write Tests First (RED) 267 | - Create test file(s) for the task requirements 268 | - Cover acceptance criteria, edge cases, and error scenarios 269 | - Tests MUST fail initially (RED state) 270 | 271 | ### 2. Minimal Implementation (GREEN) 272 | - Write just enough code to pass tests 273 | - No extra features or premature optimization 274 | - Run tests after each change - all tests must pass (GREEN state) 275 | 276 | ### 3. Refactor (REFACTOR) 277 | - Remove duplication, improve naming, simplify logic 278 | - Maintain consistent code style with project conventions 279 | - Constraint: tests must remain green throughout refactoring 280 | 281 | ## Continuous Quality Monitoring 282 | 283 | - **Scope boundaries** from task details (use scope_boundaries extracted earlier) 284 | - **File change limits** (max_files from scope compliance check) 285 | - **Technology compliance** (approved stack only) 286 | - **Test coverage** and quality thresholds 287 | - **Build status** and lint checks 288 | 289 | ### Quality Gate Requirements 290 | 291 | **Blocking checks (must pass):** 292 | - ✅ All tests pass (100% success rate) 293 | - ✅ Test coverage meets threshold 294 | - ✅ Zero lint/type errors 295 | - ✅ Build compiles and runs successfully 296 | - ✅ No critical or high severity vulnerabilities 297 | 298 | **Quality improvements (should address):** 299 | - Code complexity within reasonable limits 300 | - Documentation updated for new features 301 | - Performance meets requirements 302 | - Dependency audit clean 303 | 304 | ## Enforcement Protocols 305 | 306 | **Gustav CLI tools automatically enforce these guardrails:** 307 | 308 | ### Scope Protection 309 | - **Before**: Scope boundaries displayed from task details 310 | - **During**: Monitor file changes against max_files limit 311 | - **After**: Validate only approved changes made 312 | - **Action**: Block task completion if scope violated 313 | 314 | ### Tech Stack Enforcement 315 | - **Allowed**: Only technologies in approved techstack_research.json 316 | - **Versions**: Match exactly - no beta/alpha/experimental versions 317 | - **Action**: Block task start if non-compliant technologies detected 318 | 319 | ### TDD Enforcement 320 | - **Sequence**: Tests → Implement → Refactor (enforced by methodology) 321 | - **Coverage**: Must meet threshold defined in guardrail_config.json 322 | - **Action**: Block task completion without adequate tests 323 | 324 | ### Quality Enforcement 325 | - **Requirements**: Linting, testing, coverage, build must all pass 326 | - **Action**: Block task completion on any quality gate failure 327 | 328 | ## Status Reporting 329 | 330 | **Gustav CLI provides structured status reporting:** 331 | 332 | ```bash 333 | # Get comprehensive task execution report 334 | executor_cli get-current-status | jq '{ 335 | sprint_status: .sprint_status, 336 | milestone: .current_milestone.name, 337 | progress: "\(.completed_tasks)/\(.total_tasks) tasks complete", 338 | validation_required: .validation_required 339 | }' 340 | 341 | # Get detailed milestone status 342 | executor_cli get-milestone-status "$MILESTONE_ID" | jq '{ 343 | milestone: .milestone_name, 344 | progress: "\(.completed_tasks)/\(.total_tasks) tasks", 345 | percentage: .completion_percentage, 346 | pending_tasks: .pending_task_ids 347 | }' 348 | ``` 349 | 350 | **Report includes:** 351 | - **Task**: ID, title, milestone association, dependencies status 352 | - **Execution**: Start/completion timestamps, duration, milestone progress 353 | - **Quality**: Test results, coverage, lint status, build success 354 | - **Changes**: Files modified, scope compliance status 355 | - **Next**: Eligible tasks, blocked tasks, validation requirements 356 | 357 | ## Error Recovery 358 | 359 | **Structured error recovery with Gustav CLI:** 360 | 361 | ### Test Failures 362 | 1. Analyze root cause of failing tests 363 | 2. Fix implementation (not tests, unless tests are wrong) 364 | 3. Re-run test suite (`npm test` or equivalent) 365 | 4. Limit to ≤3 retry attempts, then escalate 366 | 367 | ### Scope Violations 368 | 1. Identify out-of-scope changes using `executor_cli check-scope-compliance` 369 | 2. Revert unauthorized modifications 370 | 3. Log violation and retry with stricter monitoring 371 | 4. Update scope boundaries if legitimate expansion needed 372 | 373 | ### Quality Gate Failures 374 | 1. **Priority order**: Failing tests → lint errors → coverage gaps → warnings 375 | 2. Re-run quality checks after each fix 376 | 3. Document any quality exceptions with justification 377 | 378 | ## Orchestration Rules 379 | 380 | 1) Language/tool agnostic; adapt to project 381 | 2) Strict guardrails; no bypass 382 | 3) Atomic tasks; progressive enhancement 383 | 4) Documentation‑first; test‑first; quality‑first 384 | 5) Milestone validation pause for human review 385 | 6) Maintain launchable app state at all times 386 | 387 | ## Command Examples 388 | 389 | Adapt to the project’s configured tools; sample commands: 390 | 391 | ```bash 392 | # JS/TS 393 | npm test -- --coverage | cat 394 | npm run lint | cat 395 | npm run build | cat 396 | 397 | # Python 398 | pytest --maxfail=1 --disable-warnings -q --cov | cat 399 | flake8 || pylint || true 400 | mypy || true 401 | 402 | # Java 403 | mvn -q -e -DskipTests=false test | cat 404 | mvn -q -e checkstyle:check spotbugs:check | cat 405 | 406 | # Go 407 | go test ./... -cover | cat 408 | golangci-lint run | cat 409 | ``` 410 | 411 | ## Final Checklist 412 | 413 | **Before running `executor_cli complete-task`:** 414 | 415 | - [ ] ✅ All tests written and passing (100% success rate) 416 | - [ ] ✅ Test coverage meets or exceeds threshold 417 | - [ ] ✅ Zero lint/type errors, no critical/high vulnerabilities 418 | - [ ] ✅ Documentation updated for new features 419 | - [ ] ✅ Scope boundaries respected (no unauthorized changes) 420 | - [ ] ✅ Tech stack compliance verified (approved technologies only) 421 | - [ ] ✅ All quality gates passed 422 | - [ ] ✅ Application still builds and launches successfully 423 | - [ ] ✅ Atomic commit made with task reference 424 | 425 | **When all checklist items are complete:** 426 | 1. Run `executor_cli complete-task "$TASK_ID"` 427 | 2. Check for milestone completion message 428 | 3. If milestone complete → run `/gustav:validator [milestone-id]` 429 | 4. If more tasks available → continue with next task 430 | 431 | **⚠️ IMPORTANT: Task completion is handled automatically by Gustav CLI tools - no manual JSON editing should be attempted.** 432 | 433 | ## Milestone Validation Messaging 434 | 435 | When a milestone is complete, display: 436 | 437 | ```markdown 438 | ═══════════════════════════════════════════════════════════════ 439 | 🎯 MILESTONE COMPLETE - VALIDATION REQUIRED 440 | ═══════════════════════════════════════════════════════════════ 441 | Milestone: [milestone-id] - [milestone-name] 442 | Status: All [N] tasks completed successfully 443 | Application State: Launch-ready 444 | 445 | ⚠️ ACTION REQUIRED: 446 | Run: /gustav:validator [milestone-id] 447 | 448 | ❌ No further tasks will execute until validation completes. 449 | ``` 450 | 451 | When validation is pending, display (blocking): 452 | 453 | ```markdown 454 | ⛔ BLOCKED: Validation Pending 455 | Milestone [milestone-id] requires validation before continuing. 456 | Run: /gustav:validator [milestone-id] 457 | ``` 458 | 459 | ## Command Chaining 460 | 461 | - `/gustav:planner` — Initial planning 462 | - `/gustav:validator` — Validation 463 | - `/gustav:velocity` — Burndown chart 464 | - `/gustav:audit` — Security check 465 | 466 | 467 | You are the guardian of code quality and sprint execution. No shortcuts; no exceptions. 468 | -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/research_integrator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Research Integration System for Gustav Enhancement 4 | 5 | Integrates with existing Gustav research system to perform targeted 6 | research for new technologies needed by enhancement features. 7 | """ 8 | 9 | import json 10 | import os 11 | from datetime import datetime 12 | from typing import Dict, List, Set, Optional 13 | from dataclasses import dataclass 14 | 15 | from dependency_analyzer import find_project_root 16 | 17 | @dataclass 18 | class ResearchResult: 19 | agent_id: str 20 | technology: str 21 | recommendations: List[str] 22 | sources: List[str] 23 | warnings: List[str] 24 | version_info: Optional[Dict] = None 25 | 26 | @dataclass 27 | class ResearchSummary: 28 | research_duration: str 29 | agents_used: int 30 | technologies_researched: List[str] 31 | existing_technologies_reused: List[str] 32 | new_research_required: bool 33 | compatibility_issues: List[str] 34 | 35 | class ResearchIntegrator: 36 | def __init__(self, tasks_dir: str = None): 37 | if tasks_dir is None: 38 | project_root = find_project_root() 39 | tasks_dir = os.path.join(project_root, ".tasks") 40 | self.tasks_dir = tasks_dir 41 | self.existing_research = self._load_existing_research() 42 | 43 | def _load_existing_research(self) -> Dict: 44 | """Load existing techstack research""" 45 | try: 46 | with open(f"{self.tasks_dir}/techstack_research.json", 'r') as f: 47 | return json.load(f) 48 | except FileNotFoundError: 49 | return {} 50 | 51 | def analyze_research_needs(self, new_technologies: List[str]) -> Dict[str, str]: 52 | """ 53 | Analyze what research is needed for new technologies. 54 | Returns dict mapping technology -> research_status 55 | """ 56 | research_needs = {} 57 | existing_stack = self.existing_research.get('stack', {}) 58 | 59 | for tech in new_technologies: 60 | if self._is_technology_researched(tech, existing_stack): 61 | research_needs[tech] = "existing" 62 | elif self._is_compatible_technology(tech, existing_stack): 63 | research_needs[tech] = "compatible" 64 | else: 65 | research_needs[tech] = "new_research_required" 66 | 67 | return research_needs 68 | 69 | def _is_technology_researched(self, tech: str, existing_stack: Dict) -> bool: 70 | """Check if technology already exists in current research""" 71 | tech_lower = tech.lower() 72 | 73 | for stack_tech in existing_stack.values(): 74 | if isinstance(stack_tech, dict): 75 | name = stack_tech.get('name', '').lower() 76 | if tech_lower == name or tech_lower in name: 77 | return True 78 | elif isinstance(stack_tech, str): 79 | if tech_lower == stack_tech.lower() or tech_lower in stack_tech.lower(): 80 | return True 81 | 82 | return False 83 | 84 | def _is_compatible_technology(self, tech: str, existing_stack: Dict) -> bool: 85 | """Check if technology is compatible with existing stack""" 86 | 87 | # Define technology compatibility groups 88 | compatibility_groups = { 89 | 'ui_frameworks': ['svelte', 'react', 'vue', 'angular'], 90 | 'backend_frameworks': ['express', 'fastapi', 'django', 'spring'], 91 | 'databases': ['sqlite', 'postgres', 'mysql', 'mongodb'], 92 | 'audio_processing': ['whisper', 'speech_recognition', 'web_audio_api'], 93 | 'ai_libraries': ['langchain', 'openai', 'anthropic', 'huggingface'], 94 | 'desktop_frameworks': ['tauri', 'electron', 'qt', 'flutter'], 95 | 'testing_frameworks': ['jest', 'pytest', 'vitest', 'mocha'] 96 | } 97 | 98 | tech_lower = tech.lower() 99 | 100 | # Find which group the new technology belongs to 101 | tech_group = None 102 | for group, techs in compatibility_groups.items(): 103 | if any(t in tech_lower for t in techs): 104 | tech_group = group 105 | break 106 | 107 | if not tech_group: 108 | return False 109 | 110 | # Check if we already have a technology from the same group 111 | for stack_tech in existing_stack.values(): 112 | if isinstance(stack_tech, dict): 113 | name = stack_tech.get('name', '').lower() 114 | elif isinstance(stack_tech, str): 115 | name = stack_tech.lower() 116 | else: 117 | continue 118 | 119 | for group_tech in compatibility_groups[tech_group]: 120 | if group_tech in name and group_tech not in tech_lower: 121 | # We have a different tech from same group - potential compatibility issue 122 | return False 123 | 124 | return True 125 | 126 | def generate_research_queries(self, technologies: List[str], project_context: Dict) -> Dict[str, List[str]]: 127 | """Generate research queries for new technologies""" 128 | 129 | project_type = self._infer_project_type(project_context) 130 | current_date = datetime.now().strftime("%B %Y") 131 | 132 | queries = {} 133 | 134 | for tech in technologies: 135 | tech_queries = [] 136 | 137 | # Base technology queries 138 | tech_queries.extend([ 139 | f"{tech} getting started guide {current_date}", 140 | f"{tech} documentation official {current_date}", 141 | f"{tech} best practices {current_date}", 142 | f"{tech} installation setup {current_date}" 143 | ]) 144 | 145 | # Project-specific queries 146 | if project_type: 147 | tech_queries.extend([ 148 | f"{tech} {project_type} integration {current_date}", 149 | f"{tech} {project_type} examples {current_date}" 150 | ]) 151 | 152 | # Compatibility queries with existing stack 153 | existing_techs = self._get_existing_technology_names() 154 | for existing_tech in existing_techs[:3]: # Limit to top 3 to avoid too many queries 155 | tech_queries.append(f"{tech} {existing_tech} compatibility {current_date}") 156 | 157 | queries[tech] = tech_queries 158 | 159 | return queries 160 | 161 | def _infer_project_type(self, project_context: Dict) -> Optional[str]: 162 | """Infer project type from existing research context""" 163 | 164 | stack = project_context.get('stack', {}) 165 | 166 | # Look for key indicators in existing stack 167 | stack_str = str(stack).lower() 168 | 169 | if any(framework in stack_str for framework in ['tauri', 'electron']): 170 | return 'desktop application' 171 | elif any(framework in stack_str for framework in ['react', 'vue', 'svelte']): 172 | return 'web application' 173 | elif any(framework in stack_str for framework in ['cli', 'command']): 174 | return 'cli tool' 175 | elif any(framework in stack_str for framework in ['game', 'unity']): 176 | return 'game' 177 | elif any(framework in stack_str for framework in ['data', 'pipeline']): 178 | return 'data pipeline' 179 | 180 | return None 181 | 182 | def _get_existing_technology_names(self) -> List[str]: 183 | """Get list of existing technology names for compatibility checking""" 184 | 185 | existing_stack = self.existing_research.get('stack', {}) 186 | tech_names = [] 187 | 188 | for tech in existing_stack.values(): 189 | if isinstance(tech, dict): 190 | name = tech.get('name') 191 | if name: 192 | tech_names.append(name) 193 | elif isinstance(tech, str): 194 | tech_names.append(tech) 195 | 196 | return tech_names 197 | 198 | def create_research_agents(self, research_queries: Dict[str, List[str]]) -> List[Dict]: 199 | """Create research agent configurations for new technologies""" 200 | 201 | agents = [] 202 | agent_id_counter = 1 203 | 204 | for tech, queries in research_queries.items(): 205 | # Create primary research agent for each technology 206 | agent = { 207 | "agent_id": f"SA-ENH-{agent_id_counter}", 208 | "technology": tech, 209 | "primary_query": queries[0] if queries else f"{tech} overview", 210 | "secondary_queries": queries[1:4] if len(queries) > 1 else [], 211 | "expected_outputs": [ 212 | "official_documentation_url", 213 | "latest_stable_version", 214 | "installation_method", 215 | "basic_usage_example", 216 | "compatibility_notes" 217 | ], 218 | "research_focus": "integration_feasibility" 219 | } 220 | agents.append(agent) 221 | agent_id_counter += 1 222 | 223 | # Create compatibility agent if needed 224 | if len(queries) > 4: # Has compatibility queries 225 | compat_agent = { 226 | "agent_id": f"SA-ENH-COMPAT-{agent_id_counter}", 227 | "technology": f"{tech}_compatibility", 228 | "primary_query": queries[-1], # Last query is usually compatibility 229 | "secondary_queries": [], 230 | "expected_outputs": [ 231 | "compatibility_status", 232 | "integration_complexity", 233 | "potential_conflicts", 234 | "migration_requirements" 235 | ], 236 | "research_focus": "compatibility_analysis" 237 | } 238 | agents.append(compat_agent) 239 | agent_id_counter += 1 240 | 241 | return agents 242 | 243 | def process_research_results(self, results: List[ResearchResult]) -> ResearchSummary: 244 | """Process research results and generate summary""" 245 | 246 | technologies_researched = [r.technology for r in results if not r.technology.endswith('_compatibility')] 247 | agents_used = len(results) 248 | compatibility_issues = [] 249 | 250 | # Extract compatibility issues 251 | for result in results: 252 | if result.technology.endswith('_compatibility'): 253 | compatibility_issues.extend(result.warnings) 254 | 255 | # Determine if new research was actually required 256 | new_research_required = len(technologies_researched) > 0 257 | 258 | # Get existing technologies that can be reused 259 | existing_technologies_reused = [] 260 | for tech in technologies_researched: 261 | if self._is_technology_researched(tech, self.existing_research.get('stack', {})): 262 | existing_technologies_reused.append(tech) 263 | 264 | return ResearchSummary( 265 | research_duration=f"{agents_used * 30}s", # Rough estimate 266 | agents_used=agents_used, 267 | technologies_researched=technologies_researched, 268 | existing_technologies_reused=existing_technologies_reused, 269 | new_research_required=new_research_required, 270 | compatibility_issues=compatibility_issues 271 | ) 272 | 273 | def update_techstack_with_research( 274 | self, 275 | research_results: List[ResearchResult], 276 | existing_techstack: Dict 277 | ) -> Dict: 278 | """Update techstack research with new findings""" 279 | 280 | updated_techstack = existing_techstack.copy() 281 | stack = updated_techstack.get('stack', {}) 282 | 283 | for result in research_results: 284 | if not result.technology.endswith('_compatibility'): 285 | 286 | # Create new technology entry 287 | tech_key = f"enhancement_{result.technology}" 288 | stack[tech_key] = { 289 | "name": result.technology, 290 | "version": result.version_info.get('version', 'latest') if result.version_info else 'latest', 291 | "version_verified": { 292 | "source": result.sources[0] if result.sources else "enhancement research", 293 | "checked_date": datetime.now().isoformat()[:10], 294 | "is_latest_stable": True 295 | }, 296 | "documentation": { 297 | "official_url": next((s for s in result.sources if 'official' in s.lower()), result.sources[0] if result.sources else ""), 298 | "last_updated": datetime.now().isoformat()[:10] 299 | }, 300 | "decision_sources": [ 301 | { 302 | "url": source, 303 | "published": datetime.now().isoformat()[:10], 304 | "relevance": "Enhancement research" 305 | } for source in result.sources[:2] # Limit to 2 sources 306 | ], 307 | "needs_verification": False, 308 | "enhancement_metadata": { 309 | "research_agent": result.agent_id, 310 | "research_date": datetime.now().isoformat()[:10], 311 | "recommendations": result.recommendations, 312 | "warnings": result.warnings 313 | } 314 | } 315 | 316 | updated_techstack['stack'] = stack 317 | 318 | # Update research metadata 319 | updated_techstack['last_enhancement_research'] = { 320 | "date": datetime.now().isoformat()[:10], 321 | "agents_used": len(research_results), 322 | "technologies_added": len([r for r in research_results if not r.technology.endswith('_compatibility')]) 323 | } 324 | 325 | return updated_techstack 326 | 327 | def validate_research_completeness(self, technologies: List[str]) -> Dict[str, bool]: 328 | """Validate that all required research has been completed""" 329 | 330 | validation_results = {} 331 | 332 | for tech in technologies: 333 | # Check if technology now exists in research 334 | has_research = self._is_technology_researched(tech, self.existing_research.get('stack', {})) 335 | 336 | # Check if minimum required information exists 337 | if has_research: 338 | stack = self.existing_research.get('stack', {}) 339 | tech_info = None 340 | 341 | for stack_tech in stack.values(): 342 | if isinstance(stack_tech, dict) and stack_tech.get('name', '').lower() == tech.lower(): 343 | tech_info = stack_tech 344 | break 345 | 346 | if tech_info: 347 | required_fields = ['version', 'documentation', 'decision_sources'] 348 | has_complete_info = all(field in tech_info for field in required_fields) 349 | validation_results[tech] = has_complete_info 350 | else: 351 | validation_results[tech] = False 352 | else: 353 | validation_results[tech] = False 354 | 355 | return validation_results 356 | 357 | def main(): 358 | """Test the research integrator""" 359 | import sys 360 | 361 | if len(sys.argv) < 2: 362 | print("Usage: research_integrator.py 'tech1,tech2,tech3'") 363 | sys.exit(1) 364 | 365 | technologies = [t.strip() for t in sys.argv[1].split(',')] 366 | 367 | integrator = ResearchIntegrator() 368 | 369 | # Analyze research needs 370 | research_needs = integrator.analyze_research_needs(technologies) 371 | print("Research Needs Analysis:") 372 | for tech, status in research_needs.items(): 373 | print(f" {tech}: {status}") 374 | 375 | # Generate research queries 376 | project_context = integrator.existing_research 377 | queries = integrator.generate_research_queries(technologies, project_context) 378 | print("\nResearch Queries:") 379 | for tech, tech_queries in queries.items(): 380 | print(f" {tech}:") 381 | for query in tech_queries: 382 | print(f" - {query}") 383 | 384 | # Create research agents 385 | agents = integrator.create_research_agents(queries) 386 | print(f"\nResearch Agents Created: {len(agents)}") 387 | for agent in agents: 388 | print(f" {agent['agent_id']}: {agent['technology']}") 389 | 390 | if __name__ == "__main__": 391 | main() -------------------------------------------------------------------------------- /.claude/commands/gustav/audit.md: -------------------------------------------------------------------------------- 1 | --- 2 | allowed-tools: 3 | - Bash, 4 | - Read, 5 | - Edit, 6 | - Write, 7 | - WebFetch, 8 | - Grep, 9 | - Glob, 10 | - LS, 11 | - MultiEdit, 12 | - Task, 13 | - WebSearch 14 | description: "Usage: /gustav:audit [scope: full|dependencies|code|config] - Security analysis and vulnerability assessment" 15 | --- 16 | 17 | Perform comprehensive security analysis and vulnerability assessment: $ARGUMENTS 18 | 19 | You are **Security Audit Engine** — an automated security analyzer that identifies vulnerabilities, validates compliance, and provides actionable remediation guidance. 20 | 21 | ## CRITICAL SECURITY PROTOCOLS 22 | 23 | ### 🛡️ ZERO-TRUST VERIFICATION 24 | 25 | ```yaml 26 | SECURITY_PRINCIPLES: 27 | Assume_Breach: Every component potentially compromised 28 | Defense_In_Depth: Multiple security layers required 29 | Least_Privilege: Minimal access by default 30 | Continuous_Validation: Security checks at every milestone 31 | Shift_Left: Security integrated from sprint planning 32 | ``` 33 | 34 | ### 🔒 COMPLIANCE FRAMEWORKS 35 | 36 | ```yaml 37 | SUPPORTED_STANDARDS: 38 | - OWASP Top 10 (2024) 39 | - CWE/SANS Top 25 40 | - PCI DSS (payment systems) 41 | - GDPR (data privacy) 42 | - SOC 2 Type II 43 | - HIPAA (healthcare) 44 | - ISO 27001/27002 45 | ``` 46 | 47 | ## PARALLEL SECURITY SCANNING ARCHITECTURE 48 | 49 | ### 🚀 MANDATORY PARALLEL EXECUTION 50 | 51 | **CRITICAL:** Launch 5-8 security scanning agents simultaneously for comprehensive coverage 52 | 53 | ```yaml 54 | PARALLEL_SCAN_PROTOCOL: 55 | Execution_Mode: PARALLEL ONLY (no sequential fallback) 56 | Agent_Count: 5-8 based on codebase size 57 | Time_Efficiency: 80% faster than sequential 58 | Coverage_Improvement: 3x more thorough 59 | TODAY: `Bash(date "+%B %Y")` 60 | ``` 61 | 62 | **IMPLEMENTATION:** ALL agents must be invoked in ONE message with multiple Task calls 63 | 64 | ### Phase 1: Parallel Security Scanning 65 | 66 | #### Core Security Agents (Always Launch) 67 | 68 | ```markdown 69 | ## Parallel Security Scanners 70 | 71 | ### SA-1-DEPENDENCIES: Dependency Vulnerability Scanner 72 | **Focus:** Third-party libraries and supply chain attacks 73 | **Actions:** 74 | - Scan package.json/requirements.txt/go.mod 75 | - Check for known CVEs in dependencies 76 | - Verify dependency signatures 77 | - Check for outdated packages 78 | - Search: "npm audit CVE database {TODAY}" 79 | - Search: "OWASP dependency check {TODAY}" 80 | 81 | ### SA-2-AUTHENTICATION: Auth & Session Security 82 | **Focus:** Authentication and authorization vulnerabilities 83 | **Actions:** 84 | - Analyze auth implementation patterns 85 | - Check session management 86 | - Review password policies 87 | - Validate JWT implementation 88 | - Search: "authentication vulnerabilities {TODAY}" 89 | - Search: "session fixation attacks {TODAY}" 90 | 91 | ### SA-3-INJECTION: Injection Attack Vectors 92 | **Focus:** SQL, NoSQL, Command, LDAP injection points 93 | **Actions:** 94 | - Scan database queries 95 | - Check input validation 96 | - Review parameterization 97 | - Analyze command execution 98 | - Search: "SQL injection prevention {TODAY}" 99 | - Search: "NoSQL injection attacks {TODAY}" 100 | 101 | ### SA-4-DATA: Data Protection & Encryption 102 | **Focus:** Sensitive data handling and encryption 103 | **Actions:** 104 | - Identify PII/sensitive data 105 | - Check encryption at rest/transit 106 | - Review key management 107 | - Validate data sanitization 108 | - Search: "data encryption best practices {TODAY}" 109 | - Search: "GDPR compliance requirements {TODAY}" 110 | 111 | ### SA-5-CONFIG: Security Misconfiguration 112 | **Focus:** Configuration vulnerabilities 113 | **Actions:** 114 | - Check security headers 115 | - Review CORS policies 116 | - Validate environment configs 117 | - Check exposed endpoints 118 | - Search: "security headers OWASP {TODAY}" 119 | - Search: "CORS vulnerabilities {TODAY}" 120 | ``` 121 | 122 | #### Conditional Security Agents 123 | 124 | ```yaml 125 | IF application_type == "web" THEN: 126 | SA-6-XSS: Cross-Site Scripting Detection 127 | SA-7-CSRF: Cross-Site Request Forgery 128 | SA-8-HEADERS: Security Headers Validation 129 | 130 | IF application_type == "api" THEN: 131 | SA-6-RATELIMIT: Rate Limiting & DDoS 132 | SA-7-APIKEYS: API Key Management 133 | SA-8-SCHEMA: API Schema Validation 134 | 135 | IF application_type == "mobile" THEN: 136 | SA-6-STORAGE: Insecure Storage 137 | SA-7-CRYPTO: Cryptographic Weaknesses 138 | SA-8-REVERSE: Reverse Engineering Protection 139 | ``` 140 | 141 | ### Phase 2: Vulnerability Analysis & Scoring 142 | 143 | ```yaml 144 | VULNERABILITY_SCORING: 145 | CVSS_Calculation: 146 | base_score: attack_vector + complexity + privileges + user_interaction 147 | temporal_score: exploit_maturity + remediation_level 148 | environmental_score: modified_impact + requirements 149 | 150 | Risk_Matrix: 151 | Critical: CVSS >= 9.0 OR auth_bypass OR RCE 152 | High: CVSS 7.0-8.9 OR data_exposure OR privilege_escalation 153 | Medium: CVSS 4.0-6.9 OR information_disclosure 154 | Low: CVSS 0.1-3.9 OR minor_configuration 155 | ``` 156 | 157 | ### Phase 3: Automated Remediation Generation 158 | 159 | ```yaml 160 | REMEDIATION_AUTOMATION: 161 | Immediate_Fixes: 162 | - Generate patch code for vulnerabilities 163 | - Create security configuration files 164 | - Update dependency versions 165 | - Add input validation functions 166 | 167 | Preventive_Measures: 168 | - Security middleware implementation 169 | - Rate limiting configurations 170 | - CSP header definitions 171 | - Encryption wrapper functions 172 | ``` 173 | 174 | ## SECURITY SCAN EXECUTION 175 | 176 | ### Step 1: Codebase Analysis 177 | 178 | ```yaml 179 | CODE_SECURITY_SCAN: 180 | Static_Analysis: 181 | - Pattern matching for vulnerable code 182 | - Taint analysis for data flow 183 | - Control flow analysis 184 | - Dead code detection 185 | 186 | Secret_Detection: 187 | - API keys in code 188 | - Hardcoded passwords 189 | - Private keys/certificates 190 | - Database credentials 191 | 192 | Vulnerability_Patterns: 193 | - eval() and exec() usage 194 | - Unsafe deserialization 195 | - Path traversal vulnerabilities 196 | - Race conditions 197 | ``` 198 | 199 | ### Step 2: Dependency Audit 200 | 201 | ```yaml 202 | DEPENDENCY_SECURITY: 203 | Supply_Chain_Analysis: 204 | - Direct dependency vulnerabilities 205 | - Transitive dependency risks 206 | - License compliance issues 207 | - Malicious package detection 208 | 209 | Version_Management: 210 | - Outdated packages with patches 211 | - Beta/alpha dependencies in production 212 | - Unverified package sources 213 | - Dependency confusion attacks 214 | ``` 215 | 216 | ### Step 3: Configuration Security 217 | 218 | ```yaml 219 | CONFIGURATION_AUDIT: 220 | Environment_Security: 221 | - Production secrets in dev 222 | - Debug mode in production 223 | - Default credentials 224 | - Exposed admin interfaces 225 | 226 | Infrastructure_Security: 227 | - Container security 228 | - Cloud misconfigurations 229 | - Network exposure 230 | - Service permissions 231 | ``` 232 | 233 | ### Step 4: OWASP Top 10 Validation 234 | 235 | ```yaml 236 | OWASP_2024_CHECKLIST: 237 | A01_Broken_Access_Control: 238 | - Path traversal checks 239 | - IDOR vulnerabilities 240 | - Missing function level access control 241 | 242 | A02_Cryptographic_Failures: 243 | - Weak algorithms (MD5, SHA1) 244 | - Insufficient key length 245 | - Predictable tokens 246 | 247 | A03_Injection: 248 | - SQL/NoSQL injection 249 | - Command injection 250 | - LDAP injection 251 | - XPath injection 252 | 253 | A04_Insecure_Design: 254 | - Threat modeling gaps 255 | - Missing security controls 256 | - Trust boundary violations 257 | 258 | A05_Security_Misconfiguration: 259 | - Default configurations 260 | - Unnecessary features enabled 261 | - Missing security headers 262 | 263 | A06_Vulnerable_Components: 264 | - Known CVEs in dependencies 265 | - Unsupported versions 266 | - Unnecessary dependencies 267 | 268 | A07_Authentication_Failures: 269 | - Weak password requirements 270 | - Missing MFA 271 | - Session fixation 272 | 273 | A08_Data_Integrity_Failures: 274 | - Insecure deserialization 275 | - Missing integrity checks 276 | - Unsigned updates 277 | 278 | A09_Security_Logging_Failures: 279 | - Insufficient logging 280 | - Log injection 281 | - Missing monitoring 282 | 283 | A10_SSRF: 284 | - URL validation 285 | - Request forgery 286 | - Internal service exposure 287 | ``` 288 | 289 | ## SECURITY REPORTS 290 | 291 | ### Executive Summary Report 292 | 293 | ```markdown 294 | ## Security Audit Report - Executive Summary 295 | 296 | **Date:** 2025-08-11 297 | **Application:** [Project Name] 298 | **Audit Type:** Comprehensive Security Assessment 299 | **Compliance:** SOC2, GDPR 300 | 301 | ### 🎯 Overall Security Score: B+ (78/100) 302 | 303 | ### Critical Findings Summary 304 | | Severity | Count | Remediated | Pending | 305 | |----------|-------|------------|---------| 306 | | Critical | 0 | 0 | 0 | 307 | | High | 2 | 1 | 1 | 308 | | Medium | 5 | 3 | 2 | 309 | | Low | 12 | 8 | 4 | 310 | 311 | ### Top Security Risks 312 | 1. **Outdated Dependencies** - 3 packages with known CVEs 313 | 2. **Missing Rate Limiting** - API endpoints vulnerable to abuse 314 | 3. **Weak Session Management** - Sessions don't expire properly 315 | 316 | ### Compliance Status 317 | - ✅ GDPR: Compliant with minor improvements needed 318 | - ⚠️ SOC2: Requires additional logging implementation 319 | - ✅ OWASP Top 10: 8/10 categories passed 320 | 321 | ### Immediate Actions Required 322 | 1. Update `lodash` to v4.17.21 (Critical CVE) 323 | 2. Implement rate limiting on authentication endpoints 324 | 3. Add security headers (CSP, X-Frame-Options) 325 | 326 | ### Security Posture Trend 327 | ``` 328 | Security Score Over Time 329 | 100 | 330 | 90 | ● ← Target 331 | 80 | ● 332 | 70 | ● ● ← Current 333 | 60 | ● 334 | 50 | ● 335 | |____________ 336 | M1 M2 M3 M4 M5 337 | Milestones 338 | ``` 339 | ``` 340 | 341 | ### Technical Findings Report 342 | 343 | ```json 344 | { 345 | "scan_metadata": { 346 | "timestamp": "2025-08-11T15:45:00Z", 347 | "duration": "18 seconds", 348 | "agents_used": 8, 349 | "files_scanned": 234, 350 | "dependencies_checked": 1847 351 | }, 352 | "vulnerabilities": [ 353 | { 354 | "id": "VUL-001", 355 | "severity": "HIGH", 356 | "type": "Vulnerable Dependency", 357 | "cwe": "CWE-1035", 358 | "location": "package.json:lodash:4.17.20", 359 | "description": "Known prototype pollution vulnerability", 360 | "cvss_score": 7.4, 361 | "remediation": { 362 | "action": "upgrade", 363 | "target_version": "4.17.21", 364 | "command": "npm update lodash@^4.17.21", 365 | "effort": "low" 366 | } 367 | }, 368 | { 369 | "id": "VUL-002", 370 | "severity": "MEDIUM", 371 | "type": "Missing Security Header", 372 | "cwe": "CWE-693", 373 | "location": "next.config.js", 374 | "description": "Content Security Policy not configured", 375 | "cvss_score": 5.3, 376 | "remediation": { 377 | "action": "add_configuration", 378 | "code": "headers: async () => [{source: '/(.*)', headers: securityHeaders}]", 379 | "documentation": "https://nextjs.org/docs/api-reference/next.config.js/headers" 380 | } 381 | } 382 | ], 383 | "compliance": { 384 | "gdpr": { 385 | "status": "PARTIAL", 386 | "gaps": ["data_retention_policy", "right_to_deletion_api"], 387 | "score": 85 388 | }, 389 | "owasp": { 390 | "status": "PASS", 391 | "failed_categories": ["A09_Security_Logging", "A05_Misconfiguration"], 392 | "score": 80 393 | } 394 | } 395 | } 396 | ``` 397 | 398 | ### Remediation Playbook 399 | 400 | ```yaml 401 | REMEDIATION_PRIORITY: 402 | Immediate (< 24 hours): 403 | - Critical vulnerabilities 404 | - Authentication bypasses 405 | - Data exposure risks 406 | - Production secrets exposed 407 | 408 | Short_term (< 1 week): 409 | - High severity CVEs 410 | - Missing security headers 411 | - Weak encryption 412 | - Session management 413 | 414 | Medium_term (< 1 month): 415 | - Medium severity issues 416 | - Code quality improvements 417 | - Logging enhancements 418 | - Documentation updates 419 | 420 | Long_term (Next sprint): 421 | - Architecture improvements 422 | - Defense in depth 423 | - Security training 424 | - Process improvements 425 | ``` 426 | 427 | ## AUTOMATED FIX GENERATION 428 | 429 | ### Security Patches 430 | 431 | ```javascript 432 | // Generated Security Middleware 433 | const securityHeaders = { 434 | 'Content-Security-Policy': "default-src 'self'; script-src 'self' 'unsafe-inline'", 435 | 'X-Frame-Options': 'DENY', 436 | 'X-Content-Type-Options': 'nosniff', 437 | 'Referrer-Policy': 'strict-origin-when-cross-origin', 438 | 'Permissions-Policy': 'camera=(), microphone=(), geolocation=()' 439 | }; 440 | 441 | // Rate Limiting Configuration 442 | const rateLimitConfig = { 443 | windowMs: 15 * 60 * 1000, // 15 minutes 444 | max: 100, // limit each IP to 100 requests per windowMs 445 | message: 'Too many requests from this IP', 446 | standardHeaders: true, 447 | legacyHeaders: false, 448 | }; 449 | 450 | // Input Validation Function 451 | function sanitizeInput(input, type = 'string') { 452 | const validators = { 453 | string: (val) => val.replace(/[<>\"']/g, ''), 454 | email: (val) => /^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(val) ? val : null, 455 | url: (val) => { 456 | try { new URL(val); return val; } 457 | catch { return null; } 458 | } 459 | }; 460 | return validators[type](input); 461 | } 462 | ``` 463 | 464 | ## CONTINUOUS SECURITY MONITORING 465 | 466 | ### Integration Points 467 | 468 | ```yaml 469 | CI_CD_INTEGRATION: 470 | Pre_Commit: 471 | - Secret scanning 472 | - Linting security rules 473 | - Dependency check 474 | 475 | Pull_Request: 476 | - Full security scan 477 | - SAST analysis 478 | - License compliance 479 | 480 | Pre_Deploy: 481 | - Production config audit 482 | - Penetration testing 483 | - Security sign-off 484 | 485 | Post_Deploy: 486 | - Runtime monitoring 487 | - Anomaly detection 488 | - Incident response 489 | ``` 490 | 491 | ### Security Metrics Dashboard 492 | 493 | ```yaml 494 | SECURITY_KPIs: 495 | Mean_Time_To_Remediate: 2.3 days 496 | Vulnerability_Density: 0.8 per KLOC 497 | Security_Debt_Ratio: 12% 498 | Patch_Coverage: 94% 499 | Security_Training_Completion: 87% 500 | ``` 501 | 502 | ## COMMAND PARAMETERS 503 | 504 | ### Usage Examples 505 | 506 | ```bash 507 | # Full security audit 508 | /security:audit 509 | 510 | # Dependency vulnerabilities only 511 | /security:audit dependencies 512 | 513 | # Code security analysis 514 | /security:audit code 515 | 516 | # Configuration audit 517 | /security:audit config 518 | 519 | # Compliance check 520 | /security:audit --compliance gdpr,soc2 521 | 522 | # Generate fixes 523 | /security:audit --auto-fix 524 | 525 | # Export detailed report 526 | /security:audit --export pdf --detailed 527 | ``` 528 | 529 | ## SECURITY ENFORCEMENT RULES 530 | 531 | ### Blocking Conditions 532 | 533 | ```yaml 534 | SECURITY_GATES: 535 | Block_Deployment: 536 | - Critical vulnerabilities present 537 | - Secrets detected in code 538 | - Authentication bypass found 539 | - Data exposure risk identified 540 | 541 | Require_Review: 542 | - High severity issues 543 | - New dependencies added 544 | - Security headers changed 545 | - Authentication modified 546 | 547 | Auto_Fix: 548 | - Outdated dependencies 549 | - Missing headers 550 | - Weak configurations 551 | - Common misconfigurations 552 | ``` 553 | 554 | ## MILESTONE INTEGRATION 555 | 556 | ### Security Checkpoints 557 | 558 | ```yaml 559 | MILESTONE_SECURITY: 560 | Pre_Milestone: 561 | - Quick security scan 562 | - Dependency check 563 | - Secret detection 564 | 565 | Post_Milestone: 566 | - Full security audit 567 | - Penetration testing 568 | - Compliance validation 569 | 570 | Sprint_End: 571 | - Security retrospective 572 | - Metrics review 573 | - Training needs assessment 574 | ``` 575 | 576 | ## OUTPUT FILES 577 | 578 | ### Generated Security Artifacts 579 | 580 | ```bash 581 | .tasks/security/ 582 | ├── audit_report.json 583 | ├── vulnerabilities.json 584 | ├── remediation_plan.json 585 | ├── compliance_status.json 586 | ├── security_patches/ 587 | │ ├── middleware.js 588 | │ ├── headers.config.js 589 | │ └── validation.utils.js 590 | ├── executive_summary.pdf 591 | └── technical_report.html 592 | ``` 593 | 594 | ## PERFORMANCE METRICS 595 | 596 | ```yaml 597 | AUDIT_PERFORMANCE: 598 | Scan_Duration: 15-20 seconds (parallel) 599 | Files_Per_Second: 50-100 600 | Dependencies_Checked: 2000+ in <5 seconds 601 | Token_Usage: ~12K for comprehensive audit 602 | Cache_Valid: 4 hours 603 | ``` 604 | 605 | ## COMMAND COMPOSITION 606 | 607 | Integrates with: 608 | 609 | - `/gustav:planner` — Initial planning 610 | - `/gustav:executor` — Development 611 | - `/gustav:validator` — Validation 612 | - `/gustav:velocity` — Burndown chart 613 | 614 | ## COMPLIANCE AUTOMATION 615 | 616 | ### Automated Evidence Collection 617 | 618 | ```yaml 619 | COMPLIANCE_EVIDENCE: 620 | Access_Controls: 621 | - Authentication logs 622 | - Authorization matrices 623 | - Role definitions 624 | 625 | Data_Protection: 626 | - Encryption verification 627 | - Data classification 628 | - Retention policies 629 | 630 | Audit_Trail: 631 | - Change management 632 | - Security events 633 | - Incident records 634 | ``` 635 | 636 | ## SESSION MANAGEMENT 637 | 638 | - Use `/compact` after generating reports 639 | - Token budget: ~12-15K for full audit 640 | - Cache results for 4 hours 641 | - Real-time monitoring during development 642 | 643 | Remember: Security is not a feature, it's a fundamental requirement. Every line of code is a potential attack vector until proven otherwise. -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/executor_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | CLI Wrapper for Gustav Executor System 4 | 5 | Provides command-line interface for the executor.md command to properly 6 | navigate and update sprint execution state without manual JSON manipulation. 7 | """ 8 | 9 | import argparse 10 | import json 11 | import os 12 | import sys 13 | from datetime import datetime 14 | from typing import Dict, List, Optional, Any 15 | 16 | from dependency_analyzer import find_project_root 17 | from json_updater import JsonUpdater 18 | 19 | 20 | class ExecutorCLI: 21 | def __init__(self, tasks_dir: Optional[str] = None): 22 | if tasks_dir is None: 23 | try: 24 | project_root = find_project_root() 25 | tasks_dir = os.path.join(project_root, ".tasks") 26 | except ValueError as e: 27 | print(f"❌ {e}") 28 | sys.exit(1) 29 | self.tasks_dir = tasks_dir 30 | 31 | # Verify tasks directory exists 32 | if not os.path.exists(self.tasks_dir): 33 | print(f"❌ Gustav tasks directory not found: {self.tasks_dir}") 34 | print(" Make sure you're in a Gustav project with initialized .tasks directory") 35 | sys.exit(1) 36 | 37 | self._load_data() 38 | 39 | def _load_data(self): 40 | """Load all Gustav JSON files""" 41 | self.task_graph = self._load_json("task_graph.json") 42 | self.progress_tracker = self._load_json("progress_tracker.json") 43 | self.guardrail_config = self._load_json("guardrail_config.json") 44 | self.techstack = self._load_json("techstack_research.json") 45 | 46 | def _load_json(self, filename: str) -> Dict: 47 | """Load JSON file from tasks directory""" 48 | try: 49 | path = os.path.join(self.tasks_dir, filename) 50 | with open(path, 'r') as f: 51 | return json.load(f) 52 | except FileNotFoundError: 53 | return {} 54 | 55 | def _save_json(self, filename: str, data: Dict, create_backup: bool = False): 56 | """Save JSON file with optional atomic backup""" 57 | if create_backup: 58 | # Use full backup protection for structural changes 59 | updater = JsonUpdater(self.tasks_dir) 60 | backup_dir = updater.create_backup() 61 | try: 62 | path = os.path.join(self.tasks_dir, filename) 63 | with open(path, 'w') as f: 64 | json.dump(data, f, indent=2) 65 | except Exception as e: 66 | updater.restore_from_backup(backup_dir) 67 | raise e 68 | else: 69 | # Simple write for routine status updates 70 | path = os.path.join(self.tasks_dir, filename) 71 | with open(path, 'w') as f: 72 | json.dump(data, f, indent=2) 73 | 74 | def get_current_status(self) -> Dict: 75 | """Get current sprint execution status""" 76 | status = { 77 | "sprint_status": self.progress_tracker.get("status", "unknown"), 78 | "current_milestone": self.progress_tracker.get("current_milestone", {}), 79 | "total_tasks": self.progress_tracker.get("total_tasks", 0), 80 | "completed_tasks": 0, 81 | "validation_required": False, 82 | "blocked_reason": None 83 | } 84 | 85 | # Count completed tasks 86 | for task in self.task_graph.get("tasks", []): 87 | if task.get("status") == "completed": 88 | status["completed_tasks"] += 1 89 | 90 | # Check if validation is required 91 | current_milestone = status["current_milestone"] 92 | if current_milestone: 93 | milestone_id = current_milestone.get("id") 94 | milestone_data = self._get_milestone_by_id(milestone_id) 95 | if milestone_data: 96 | milestone_tasks = milestone_data.get("tasks", []) 97 | completed_milestone_tasks = 0 98 | for task_id in milestone_tasks: 99 | task = self._get_task_by_id(task_id) 100 | if task and task.get("status") == "completed": 101 | completed_milestone_tasks += 1 102 | 103 | if completed_milestone_tasks == len(milestone_tasks): 104 | status["validation_required"] = True 105 | status["blocked_reason"] = f"Milestone {milestone_id} complete - validation required" 106 | 107 | return status 108 | 109 | def get_next_task(self, task_id: Optional[str] = None) -> Dict: 110 | """Get next eligible task or specific task by ID""" 111 | if task_id: 112 | task = self._get_task_by_id(task_id) 113 | if not task: 114 | return {"error": f"Task {task_id} not found"} 115 | 116 | # Check dependencies 117 | dependencies_met = self._check_dependencies(task_id) 118 | if not dependencies_met["satisfied"]: 119 | return { 120 | "error": f"Task {task_id} dependencies not met", 121 | "missing_dependencies": dependencies_met["missing"] 122 | } 123 | 124 | return {"task": task, "eligible": True} 125 | 126 | # Find next eligible task in current milestone 127 | current_milestone = self.progress_tracker.get("current_milestone", {}) 128 | milestone_id = current_milestone.get("id") 129 | 130 | if not milestone_id: 131 | return {"error": "No current milestone set"} 132 | 133 | milestone = self._get_milestone_by_id(milestone_id) 134 | if not milestone: 135 | return {"error": f"Milestone {milestone_id} not found"} 136 | 137 | for task_id in milestone.get("tasks", []): 138 | task = self._get_task_by_id(task_id) 139 | if not task: 140 | continue 141 | 142 | # Skip completed tasks 143 | if task.get("status") == "completed": 144 | continue 145 | 146 | # Check dependencies 147 | dependencies_met = self._check_dependencies(task_id) 148 | if dependencies_met["satisfied"]: 149 | return {"task": task, "eligible": True} 150 | 151 | return {"error": "No eligible tasks found in current milestone"} 152 | 153 | def get_task_details(self, task_id: str) -> Dict: 154 | """Get comprehensive task details including scope boundaries""" 155 | task = self._get_task_by_id(task_id) 156 | if not task: 157 | return {"error": f"Task {task_id} not found"} 158 | 159 | # Add dependency information 160 | dependencies_status = self._check_dependencies(task_id) 161 | 162 | # Add milestone context 163 | milestone = self._get_milestone_for_task(task_id) 164 | 165 | # Add scope boundaries from guardrails 166 | scope_boundaries = self._get_scope_boundaries(task_id) 167 | 168 | return { 169 | "task": task, 170 | "dependencies": dependencies_status, 171 | "milestone": milestone, 172 | "scope_boundaries": scope_boundaries, 173 | "tech_compliance": self._check_tech_compliance(task) 174 | } 175 | 176 | def start_task(self, task_id: str) -> Dict: 177 | """Mark task as in-progress and update timestamps""" 178 | task = self._get_task_by_id(task_id) 179 | if not task: 180 | return {"error": f"Task {task_id} not found"} 181 | 182 | # Check if task is eligible 183 | dependencies_status = self._check_dependencies(task_id) 184 | if not dependencies_status["satisfied"]: 185 | return { 186 | "error": f"Cannot start task - dependencies not met", 187 | "missing_dependencies": dependencies_status["missing"] 188 | } 189 | 190 | # Update task status 191 | for task_obj in self.task_graph.get("tasks", []): 192 | if task_obj.get("id") == task_id: 193 | task_obj["status"] = "in_progress" 194 | task_obj["started_at"] = datetime.now().isoformat() 195 | break 196 | 197 | # Save changes atomically 198 | try: 199 | self._save_json("task_graph.json", self.task_graph) 200 | return {"success": True, "task_id": task_id, "status": "in_progress"} 201 | except Exception as e: 202 | return {"error": f"Failed to start task: {e}"} 203 | 204 | def complete_task(self, task_id: str) -> Dict: 205 | """Mark task as complete and update milestone progress""" 206 | task = self._get_task_by_id(task_id) 207 | if not task: 208 | return {"error": f"Task {task_id} not found"} 209 | 210 | # Update task status 211 | for task_obj in self.task_graph.get("tasks", []): 212 | if task_obj.get("id") == task_id: 213 | task_obj["status"] = "completed" 214 | task_obj["completed_at"] = datetime.now().isoformat() 215 | break 216 | 217 | # Update milestone progress in progress_tracker 218 | current_milestone = self.progress_tracker.get("current_milestone", {}) 219 | milestone_id = current_milestone.get("id") 220 | 221 | if milestone_id: 222 | milestone = self._get_milestone_by_id(milestone_id) 223 | if milestone: 224 | milestone_tasks = milestone.get("tasks", []) 225 | completed_count = 0 226 | for mid_task_id in milestone_tasks: 227 | mid_task = self._get_task_by_id(mid_task_id) 228 | if mid_task and mid_task.get("status") == "completed": 229 | completed_count += 1 230 | 231 | # Update progress tracker 232 | current_milestone["tasks_completed"] = completed_count 233 | self.progress_tracker["current_milestone"] = current_milestone 234 | 235 | # Check if milestone is complete 236 | milestone_complete = (completed_count == len(milestone_tasks)) 237 | 238 | # Save changes atomically 239 | try: 240 | self._save_json("task_graph.json", self.task_graph) 241 | self._save_json("progress_tracker.json", self.progress_tracker) 242 | 243 | result = {"success": True, "task_id": task_id, "status": "completed"} 244 | if milestone_complete: 245 | result["milestone_complete"] = True 246 | result["milestone_id"] = milestone_id 247 | result["validation_required"] = True 248 | 249 | return result 250 | except Exception as e: 251 | return {"error": f"Failed to complete task: {e}"} 252 | 253 | def validate_dependencies(self, task_id: str) -> Dict: 254 | """Check if task dependencies are satisfied""" 255 | return self._check_dependencies(task_id) 256 | 257 | def check_scope_compliance(self, task_id: str) -> Dict: 258 | """Validate task against scope boundaries""" 259 | return self._get_scope_boundaries(task_id) 260 | 261 | def get_milestone_status(self, milestone_id: str) -> Dict: 262 | """Get milestone completion status""" 263 | milestone = self._get_milestone_by_id(milestone_id) 264 | if not milestone: 265 | return {"error": f"Milestone {milestone_id} not found"} 266 | 267 | milestone_tasks = milestone.get("tasks", []) 268 | completed_tasks = [] 269 | pending_tasks = [] 270 | 271 | for task_id in milestone_tasks: 272 | task = self._get_task_by_id(task_id) 273 | if task: 274 | if task.get("status") == "completed": 275 | completed_tasks.append(task_id) 276 | else: 277 | pending_tasks.append(task_id) 278 | 279 | return { 280 | "milestone_id": milestone_id, 281 | "milestone_name": milestone.get("name", "Unknown"), 282 | "total_tasks": len(milestone_tasks), 283 | "completed_tasks": len(completed_tasks), 284 | "pending_tasks": len(pending_tasks), 285 | "completion_percentage": (len(completed_tasks) / len(milestone_tasks)) * 100 if milestone_tasks else 0, 286 | "is_complete": len(pending_tasks) == 0, 287 | "completed_task_ids": completed_tasks, 288 | "pending_task_ids": pending_tasks 289 | } 290 | 291 | def _get_task_by_id(self, task_id: str) -> Optional[Dict]: 292 | """Find task by ID in task graph""" 293 | for task in self.task_graph.get("tasks", []): 294 | if task.get("id") == task_id: 295 | return task 296 | return None 297 | 298 | def _get_milestone_by_id(self, milestone_id: str) -> Optional[Dict]: 299 | """Find milestone by ID in task graph""" 300 | for milestone in self.task_graph.get("milestones", []): 301 | if milestone.get("id") == milestone_id: 302 | return milestone 303 | return None 304 | 305 | def _get_milestone_for_task(self, task_id: str) -> Optional[Dict]: 306 | """Find which milestone contains the given task""" 307 | for milestone in self.task_graph.get("milestones", []): 308 | if task_id in milestone.get("tasks", []): 309 | return milestone 310 | return None 311 | 312 | def _check_dependencies(self, task_id: str) -> Dict: 313 | """Check if all task dependencies are satisfied""" 314 | task = self._get_task_by_id(task_id) 315 | if not task: 316 | return {"satisfied": False, "missing": [], "error": "Task not found"} 317 | 318 | dependencies = task.get("dependencies", []) 319 | missing = [] 320 | 321 | for dep_id in dependencies: 322 | dep_task = self._get_task_by_id(dep_id) 323 | if not dep_task or dep_task.get("status") != "completed": 324 | missing.append(dep_id) 325 | 326 | return { 327 | "satisfied": len(missing) == 0, 328 | "missing": missing, 329 | "total_dependencies": len(dependencies) 330 | } 331 | 332 | def _get_scope_boundaries(self, task_id: str) -> Dict: 333 | """Get scope boundaries for task from guardrails""" 334 | task = self._get_task_by_id(task_id) 335 | if not task: 336 | return {"error": "Task not found"} 337 | 338 | scope_boundaries = task.get("scope_boundaries", {}) 339 | guardrails = self.guardrail_config.get("scope_creep_detection", {}) 340 | 341 | return { 342 | "must_implement": scope_boundaries.get("must_implement", []), 343 | "must_not_implement": scope_boundaries.get("must_not_implement", []), 344 | "max_files": scope_boundaries.get("max_file_changes", 10), 345 | "forbidden_patterns": guardrails.get("forbidden_keywords", []), 346 | "allowed_technologies": list(self.techstack.get("stack", {}).keys()) 347 | } 348 | 349 | def _check_tech_compliance(self, task: Dict) -> Dict: 350 | """Check if task complies with approved tech stack""" 351 | allowed_stack = set(self.techstack.get("stack", {}).keys()) 352 | task_technologies = set() 353 | 354 | # Extract technologies from task context 355 | documentation_context = task.get("documentation_context", {}) 356 | if "version_locks" in documentation_context: 357 | task_technologies.update(documentation_context["version_locks"].keys()) 358 | 359 | non_compliant = task_technologies - allowed_stack 360 | 361 | return { 362 | "compliant": len(non_compliant) == 0, 363 | "non_compliant_technologies": list(non_compliant), 364 | "allowed_technologies": list(allowed_stack), 365 | "task_technologies": list(task_technologies) 366 | } 367 | 368 | 369 | def main(): 370 | parser = argparse.ArgumentParser(description='Gustav Executor CLI') 371 | parser.add_argument('action', choices=[ 372 | 'get-current-status', 373 | 'get-next-task', 374 | 'get-task-details', 375 | 'start-task', 376 | 'complete-task', 377 | 'validate-dependencies', 378 | 'check-scope-compliance', 379 | 'get-milestone-status' 380 | ]) 381 | parser.add_argument('task_id', nargs='?', help='Task ID for task-specific operations') 382 | parser.add_argument('tasks_dir', nargs='?', help='Path to .tasks directory') 383 | 384 | args = parser.parse_args() 385 | 386 | try: 387 | executor = ExecutorCLI(args.tasks_dir) 388 | 389 | if args.action == 'get-current-status': 390 | result = executor.get_current_status() 391 | 392 | elif args.action == 'get-next-task': 393 | result = executor.get_next_task(args.task_id) 394 | 395 | elif args.action == 'get-task-details': 396 | if not args.task_id: 397 | print("❌ Task ID required for get-task-details") 398 | sys.exit(1) 399 | result = executor.get_task_details(args.task_id) 400 | 401 | elif args.action == 'start-task': 402 | if not args.task_id: 403 | print("❌ Task ID required for start-task") 404 | sys.exit(1) 405 | result = executor.start_task(args.task_id) 406 | 407 | elif args.action == 'complete-task': 408 | if not args.task_id: 409 | print("❌ Task ID required for complete-task") 410 | sys.exit(1) 411 | result = executor.complete_task(args.task_id) 412 | 413 | elif args.action == 'validate-dependencies': 414 | if not args.task_id: 415 | print("❌ Task ID required for validate-dependencies") 416 | sys.exit(1) 417 | result = executor.validate_dependencies(args.task_id) 418 | 419 | elif args.action == 'check-scope-compliance': 420 | if not args.task_id: 421 | print("❌ Task ID required for check-scope-compliance") 422 | sys.exit(1) 423 | result = executor.check_scope_compliance(args.task_id) 424 | 425 | elif args.action == 'get-milestone-status': 426 | if not args.task_id: # Using task_id parameter for milestone_id 427 | print("❌ Milestone ID required for get-milestone-status") 428 | sys.exit(1) 429 | result = executor.get_milestone_status(args.task_id) 430 | 431 | print(json.dumps(result, indent=2)) 432 | 433 | # Exit with error code if result contains an error 434 | if isinstance(result, dict) and "error" in result: 435 | sys.exit(1) 436 | 437 | except Exception as e: 438 | print(f"❌ Executor CLI error: {e}") 439 | sys.exit(1) 440 | 441 | 442 | if __name__ == "__main__": 443 | main() -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/json_updater.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Atomic JSON File Update System for Gustav Enhancement 4 | 5 | Handles consistent updates across all .tasks/*.json files when adding 6 | new features to maintain data integrity and consistency. 7 | """ 8 | 9 | import json 10 | import os 11 | import shutil 12 | from datetime import datetime 13 | from typing import Dict, List, Optional 14 | from dataclasses import dataclass 15 | 16 | from dependency_analyzer import FeatureAnalysis, find_project_root 17 | from task_inserter import InsertionPlan 18 | 19 | @dataclass 20 | class UpdateSummary: 21 | files_updated: List[str] 22 | backup_location: str 23 | new_task_ids: List[str] 24 | milestones_affected: List[str] 25 | total_tasks_before: int 26 | total_tasks_after: int 27 | 28 | class JsonUpdater: 29 | def __init__(self, tasks_dir: Optional[str] = None): 30 | if tasks_dir is None: 31 | try: 32 | project_root = find_project_root() 33 | tasks_dir = os.path.join(project_root, ".tasks") 34 | except ValueError as e: 35 | print(f"❌ {e}") 36 | import sys 37 | sys.exit(1) 38 | self.tasks_dir = tasks_dir 39 | self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 40 | self.backup_dir = f"{tasks_dir}/backup/{self.timestamp}" 41 | 42 | def create_backup(self) -> str: 43 | """Create backup of all JSON files before modification""" 44 | os.makedirs(self.backup_dir, exist_ok=True) 45 | 46 | json_files = [ 47 | "task_graph.json", 48 | "progress_tracker.json", 49 | "techstack_research.json", 50 | "guardrail_config.json", 51 | "deferred.json", 52 | "prd_digest.json" 53 | ] 54 | 55 | for filename in json_files: 56 | source_path = f"{self.tasks_dir}/{filename}" 57 | if os.path.exists(source_path): 58 | shutil.copy2(source_path, f"{self.backup_dir}/{filename}") 59 | 60 | return self.backup_dir 61 | 62 | def restore_from_backup(self, backup_dir: str) -> bool: 63 | """Restore files from backup in case of failure""" 64 | try: 65 | for filename in os.listdir(backup_dir): 66 | if filename.endswith('.json'): 67 | shutil.copy2(f"{backup_dir}/{filename}", f"{self.tasks_dir}/{filename}") 68 | return True 69 | except Exception as e: 70 | print(f"Error restoring from backup: {e}") 71 | return False 72 | 73 | def apply_enhancement( 74 | self, 75 | analysis: FeatureAnalysis, 76 | plan: InsertionPlan 77 | ) -> UpdateSummary: 78 | """Apply enhancement plan to all JSON files atomically""" 79 | 80 | # Create backup first 81 | backup_location = self.create_backup() 82 | 83 | try: 84 | # Track what we're updating 85 | files_updated = [] 86 | new_task_ids = [task['id'] for task in plan.new_tasks] 87 | 88 | # Load current data 89 | current_data = self._load_all_json_files() 90 | total_tasks_before = len(current_data['task_graph'].get('tasks', [])) 91 | 92 | # Update each file 93 | updated_task_graph = self._update_task_graph( 94 | current_data['task_graph'], plan 95 | ) 96 | if updated_task_graph != current_data['task_graph']: 97 | self._save_json("task_graph.json", updated_task_graph) 98 | files_updated.append("task_graph.json") 99 | 100 | updated_progress = self._update_progress_tracker( 101 | current_data['progress_tracker'], plan, analysis 102 | ) 103 | if updated_progress != current_data['progress_tracker']: 104 | self._save_json("progress_tracker.json", updated_progress) 105 | files_updated.append("progress_tracker.json") 106 | 107 | updated_guardrails = self._update_guardrail_config( 108 | current_data['guardrail_config'], plan, analysis 109 | ) 110 | if updated_guardrails != current_data['guardrail_config']: 111 | self._save_json("guardrail_config.json", updated_guardrails) 112 | files_updated.append("guardrail_config.json") 113 | 114 | updated_prd_digest = self._update_prd_digest( 115 | current_data['prd_digest'], plan, analysis 116 | ) 117 | if updated_prd_digest != current_data['prd_digest']: 118 | self._save_json("prd_digest.json", updated_prd_digest) 119 | files_updated.append("prd_digest.json") 120 | 121 | # Update deferred.json if reactivating a feature 122 | updated_deferred = self._update_deferred_features( 123 | current_data['deferred'], analysis 124 | ) 125 | if updated_deferred != current_data['deferred']: 126 | self._save_json("deferred.json", updated_deferred) 127 | files_updated.append("deferred.json") 128 | 129 | # Update techstack if new technologies were added 130 | updated_techstack = self._update_techstack_research( 131 | current_data['techstack_research'], analysis 132 | ) 133 | if updated_techstack != current_data['techstack_research']: 134 | self._save_json("techstack_research.json", updated_techstack) 135 | files_updated.append("techstack_research.json") 136 | 137 | # Validate all files after update 138 | self._validate_json_consistency() 139 | 140 | total_tasks_after = len(updated_task_graph.get('tasks', [])) 141 | 142 | return UpdateSummary( 143 | files_updated=files_updated, 144 | backup_location=backup_location, 145 | new_task_ids=new_task_ids, 146 | milestones_affected=plan.impact_summary.get('milestones_affected', []), 147 | total_tasks_before=total_tasks_before, 148 | total_tasks_after=total_tasks_after 149 | ) 150 | 151 | except Exception as e: 152 | # Restore from backup on any error 153 | print(f"Error during update, restoring from backup: {e}") 154 | self.restore_from_backup(backup_location) 155 | raise 156 | 157 | def _load_all_json_files(self) -> Dict[str, Dict]: 158 | """Load all JSON files into memory""" 159 | files = { 160 | 'task_graph': 'task_graph.json', 161 | 'progress_tracker': 'progress_tracker.json', 162 | 'techstack_research': 'techstack_research.json', 163 | 'guardrail_config': 'guardrail_config.json', 164 | 'deferred': 'deferred.json', 165 | 'prd_digest': 'prd_digest.json' 166 | } 167 | 168 | data = {} 169 | for key, filename in files.items(): 170 | path = f"{self.tasks_dir}/{filename}" 171 | if os.path.exists(path): 172 | with open(path, 'r') as f: 173 | data[key] = json.load(f) 174 | else: 175 | data[key] = {} 176 | 177 | return data 178 | 179 | def _save_json(self, filename: str, data: Dict) -> None: 180 | """Save JSON data to file with pretty formatting""" 181 | path = f"{self.tasks_dir}/{filename}" 182 | with open(path, 'w') as f: 183 | json.dump(data, f, indent=2) 184 | 185 | def _update_task_graph(self, task_graph: Dict, plan: InsertionPlan) -> Dict: 186 | """Update task_graph.json with new tasks and milestones""" 187 | updated = task_graph.copy() 188 | 189 | # Add new tasks to tasks array 190 | existing_tasks = updated.get('tasks', []) 191 | existing_tasks.extend(plan.new_tasks) 192 | updated['tasks'] = existing_tasks 193 | 194 | # Update milestones 195 | updated['milestones'] = plan.updated_milestones 196 | 197 | # Update scope enforcement 198 | scope_enforcement = updated.get('scope_enforcement', {}) 199 | scope_enforcement['total_tasks'] = len(existing_tasks) 200 | 201 | # Recalculate complexity score (simple heuristic) 202 | complexity_score = scope_enforcement.get('complexity_score', 0) 203 | for task in plan.new_tasks: 204 | enhancement_meta = task.get('enhancement_metadata', {}) 205 | impact = enhancement_meta.get('impact_assessment', 'low') 206 | if impact == 'high': 207 | complexity_score += 3 208 | elif impact == 'medium': 209 | complexity_score += 2 210 | else: 211 | complexity_score += 1 212 | 213 | scope_enforcement['complexity_score'] = complexity_score 214 | updated['scope_enforcement'] = scope_enforcement 215 | 216 | return updated 217 | 218 | def _update_progress_tracker( 219 | self, 220 | progress_tracker: Dict, 221 | plan: InsertionPlan, 222 | analysis: FeatureAnalysis 223 | ) -> Dict: 224 | """Update progress_tracker.json with new task counts and status""" 225 | updated = progress_tracker.copy() 226 | 227 | # Update totals 228 | updated['total_tasks'] = updated.get('total_tasks', 0) + len(plan.new_tasks) 229 | 230 | # Update current milestone if tasks were added there 231 | current_milestone = updated.get('current_milestone', {}) 232 | if current_milestone.get('id') == plan.selected_option.target_milestone_id: 233 | current_milestone['tasks_total'] = current_milestone.get('tasks_total', 0) + len(plan.new_tasks) 234 | updated['current_milestone'] = current_milestone 235 | 236 | # Add enhancement tracking 237 | enhancements = updated.get('enhancements', []) 238 | enhancement_record = { 239 | "enhancement_id": f"ENH-{self.timestamp}", 240 | "feature_id": analysis.feature_id, 241 | "description": analysis.description, 242 | "added_date": datetime.now().isoformat()[:10], 243 | "tasks_added": len(plan.new_tasks), 244 | "milestone_target": plan.selected_option.target_milestone_id, 245 | "complexity": analysis.complexity 246 | } 247 | enhancements.append(enhancement_record) 248 | updated['enhancements'] = enhancements 249 | 250 | return updated 251 | 252 | def _update_guardrail_config( 253 | self, 254 | guardrail_config: Dict, 255 | plan: InsertionPlan, 256 | analysis: FeatureAnalysis 257 | ) -> Dict: 258 | """Update guardrail_config.json with new protection rules if needed""" 259 | updated = guardrail_config.copy() 260 | 261 | # Add enhancement-specific protection if high complexity 262 | if analysis.complexity == 'high': 263 | scope_creep_detection = updated.get('scope_creep_detection', {}) 264 | forbidden_keywords = scope_creep_detection.get('forbidden_keywords', []) 265 | 266 | # Add keywords to prevent scope creep in enhancement 267 | enhancement_keywords = [ 268 | f"beyond-{analysis.feature_id.lower()}", 269 | "additional-features", 270 | "extra-functionality" 271 | ] 272 | 273 | for keyword in enhancement_keywords: 274 | if keyword not in forbidden_keywords: 275 | forbidden_keywords.append(keyword) 276 | 277 | scope_creep_detection['forbidden_keywords'] = forbidden_keywords 278 | updated['scope_creep_detection'] = scope_creep_detection 279 | 280 | return updated 281 | 282 | def _update_prd_digest( 283 | self, 284 | prd_digest: Dict, 285 | plan: InsertionPlan, 286 | analysis: FeatureAnalysis 287 | ) -> Dict: 288 | """Update prd_digest.json with enhancement information""" 289 | updated = prd_digest.copy() 290 | 291 | # Add to MVP features if there's room (max 7) 292 | mvp_features = updated.get('mvp_features', []) 293 | if len(mvp_features) < 7: 294 | mvp_feature = { 295 | "id": analysis.feature_id, 296 | "name": analysis.description[:50] + "..." if len(analysis.description) > 50 else analysis.description, 297 | "prd_lines": ["ENHANCEMENT"], 298 | "original_text": analysis.description, 299 | "why_mvp": f"Enhancement added post-planning - {analysis.complexity} complexity" 300 | } 301 | mvp_features.append(mvp_feature) 302 | updated['mvp_features'] = mvp_features 303 | 304 | # Update protection metrics 305 | protection_metrics = updated.get('protection_metrics', {}) 306 | protection_metrics['enhancements_added'] = protection_metrics.get('enhancements_added', 0) + 1 307 | protection_metrics['last_enhancement_date'] = datetime.now().isoformat()[:10] 308 | updated['protection_metrics'] = protection_metrics 309 | 310 | return updated 311 | 312 | def _update_deferred_features( 313 | self, 314 | deferred: Dict, 315 | analysis: FeatureAnalysis 316 | ) -> Dict: 317 | """Update deferred.json - remove feature if it's being reactivated""" 318 | updated = deferred.copy() 319 | 320 | # Check if this enhancement matches a deferred feature 321 | deferred_features = updated.get('deferred_features', []) 322 | original_count = len(deferred_features) 323 | 324 | # Remove any deferred feature that matches this description (simple keyword matching) 325 | analysis_keywords = set(analysis.description.lower().split()) 326 | filtered_features = [] 327 | 328 | for feature in deferred_features: 329 | feature_keywords = set(feature.get('name', '').lower().split()) 330 | # If there's significant overlap, consider it the same feature 331 | overlap = len(analysis_keywords.intersection(feature_keywords)) 332 | if overlap < 2: # Require at least 2 matching keywords to consider it the same 333 | filtered_features.append(feature) 334 | 335 | if len(filtered_features) < original_count: 336 | updated['deferred_features'] = filtered_features 337 | updated['total_deferred'] = len(filtered_features) 338 | 339 | return updated 340 | 341 | def _update_techstack_research( 342 | self, 343 | techstack_research: Dict, 344 | analysis: FeatureAnalysis 345 | ) -> Dict: 346 | """Update techstack_research.json if new technologies were added""" 347 | updated = techstack_research.copy() 348 | 349 | if not analysis.new_technologies: 350 | return updated 351 | 352 | # Add placeholder research for new technologies 353 | # In a real implementation, this would trigger actual research 354 | stack = updated.get('stack', {}) 355 | 356 | for tech in analysis.new_technologies: 357 | if tech not in stack: 358 | stack[tech] = { 359 | "name": tech, 360 | "version": "TBD", 361 | "version_verified": { 362 | "source": "Enhancement - needs research", 363 | "checked_date": datetime.now().isoformat()[:10], 364 | "is_latest_stable": False 365 | }, 366 | "documentation": { 367 | "official_url": "TBD", 368 | "last_updated": "TBD" 369 | }, 370 | "decision_sources": [{ 371 | "url": "Enhancement request", 372 | "published": datetime.now().isoformat()[:10], 373 | "relevance": "Required for new feature" 374 | }], 375 | "needs_verification": True 376 | } 377 | 378 | updated['stack'] = stack 379 | 380 | # Update research metadata 381 | research_metadata = updated.get('research_timestamp', '') 382 | updated['last_enhancement_research'] = datetime.now().isoformat()[:10] 383 | 384 | return updated 385 | 386 | def _validate_json_consistency(self) -> None: 387 | """Validate that all JSON files are consistent after updates""" 388 | 389 | # Load all files 390 | data = self._load_all_json_files() 391 | 392 | # Check task_graph consistency 393 | task_graph = data.get('task_graph', {}) 394 | progress_tracker = data.get('progress_tracker', {}) 395 | 396 | # Validate task count consistency 397 | tasks_in_graph = len(task_graph.get('tasks', [])) 398 | tasks_in_progress = progress_tracker.get('total_tasks', 0) 399 | 400 | if tasks_in_graph != tasks_in_progress: 401 | raise ValueError(f"Task count mismatch: graph has {tasks_in_graph}, progress has {tasks_in_progress}") 402 | 403 | # Validate milestone consistency 404 | milestones_in_graph = {m.get('id') for m in task_graph.get('milestones', [])} 405 | current_milestone_id = progress_tracker.get('current_milestone', {}).get('id') 406 | 407 | if current_milestone_id and current_milestone_id not in milestones_in_graph: 408 | raise ValueError(f"Current milestone {current_milestone_id} not found in milestone list") 409 | 410 | # Validate all task IDs are unique 411 | task_ids = [task.get('id') for task in task_graph.get('tasks', [])] 412 | if len(task_ids) != len(set(task_ids)): 413 | duplicates = [tid for tid in task_ids if task_ids.count(tid) > 1] 414 | raise ValueError(f"Duplicate task IDs found: {duplicates}") 415 | 416 | print("✅ JSON consistency validation passed") 417 | 418 | def main(): 419 | """Test the JSON updater""" 420 | import sys 421 | from dependency_analyzer import DependencyAnalyzer 422 | from task_inserter import TaskInserter 423 | 424 | if len(sys.argv) < 2: 425 | print("Usage: json_updater.py 'feature description'") 426 | sys.exit(1) 427 | 428 | feature_desc = sys.argv[1] 429 | 430 | # Analyze feature 431 | analyzer = DependencyAnalyzer() 432 | analysis = analyzer.analyze_feature(feature_desc) 433 | 434 | # Find insertion plan 435 | inserter = TaskInserter() 436 | options = inserter.find_insertion_options(analysis) 437 | if not options: 438 | print("No suitable insertion options found") 439 | sys.exit(1) 440 | 441 | plan = inserter.create_insertion_plan(analysis, options[0]) 442 | 443 | # Apply updates (dry run) 444 | updater = JsonUpdater() 445 | print("This would create backup and update the following files:") 446 | print(f"- task_graph.json: Add {len(plan.new_tasks)} tasks") 447 | print(f"- progress_tracker.json: Update totals and milestones") 448 | print(f"- guardrail_config.json: Add protection rules") 449 | print(f"- prd_digest.json: Track enhancement") 450 | print(f"- deferred.json: Remove if reactivating feature") 451 | print(f"- techstack_research.json: Add {len(analysis.new_technologies)} new technologies") 452 | 453 | # Uncomment to actually apply: 454 | # summary = updater.apply_enhancement(analysis, plan) 455 | # print(f"Update Summary: {summary}") 456 | 457 | if __name__ == "__main__": 458 | main() -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/dependency_analyzer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Dependency Analysis Logic for Gustav Enhancement System 4 | 5 | Analyzes feature dependencies and determines optimal insertion points 6 | in existing sprint milestone structures. 7 | """ 8 | 9 | import json 10 | import os 11 | import re 12 | from typing import Dict, List, Tuple, Optional, Set 13 | from dataclasses import dataclass 14 | from enum import Enum 15 | 16 | class DependencyType(Enum): 17 | TECHNICAL = "technical" # Code/API dependencies 18 | LOGICAL = "logical" # User workflow dependencies 19 | DATA = "data" # Data/state dependencies 20 | 21 | @dataclass 22 | class TaskDependency: 23 | task_id: str 24 | dependency_type: DependencyType 25 | reason: str 26 | strength: str # "required" | "preferred" | "optional" 27 | 28 | @dataclass 29 | class FeatureAnalysis: 30 | feature_id: str 31 | description: str 32 | estimated_tasks: int 33 | complexity: str # "low" | "medium" | "high" 34 | new_technologies: List[str] 35 | dependencies: List[TaskDependency] 36 | conflicts: List[str] 37 | 38 | def find_project_root(start_dir: str = None) -> str: 39 | """Find project root by looking for Gustav-specific markers, then .git directory 40 | 41 | Args: 42 | start_dir: Directory to start search from. Defaults to current working directory. 43 | 44 | Returns: 45 | Absolute path to project root directory. 46 | 47 | Raises: 48 | ValueError: If no Gustav project markers are found. 49 | """ 50 | if start_dir is None: 51 | start_dir = os.getcwd() 52 | 53 | current = os.path.abspath(start_dir) 54 | original_start = current 55 | 56 | while current != os.path.dirname(current): # Not at filesystem root 57 | # Priority 1: Look for Gustav-specific markers (.tasks with required files) 58 | tasks_dir = os.path.join(current, '.tasks') 59 | if (os.path.exists(tasks_dir) and 60 | os.path.exists(os.path.join(tasks_dir, 'task_graph.json'))): 61 | return current 62 | 63 | # Priority 2: Look for .git directory (likely a project root) 64 | if os.path.exists(os.path.join(current, '.git')): 65 | # Check if this git project also has Gustav files 66 | tasks_dir = os.path.join(current, '.tasks') 67 | if os.path.exists(tasks_dir): 68 | return current 69 | # If no .tasks, continue searching - this might be a parent repo 70 | 71 | # Priority 3: Look for .claude directory with gustav commands (legacy) 72 | claude_dir = os.path.join(current, '.claude', 'commands', 'gustav') 73 | if os.path.exists(claude_dir): 74 | # Only return if this also looks like a project root 75 | if (os.path.exists(os.path.join(current, '.git')) or 76 | os.path.exists(os.path.join(current, '.tasks'))): 77 | return current 78 | 79 | current = os.path.dirname(current) 80 | 81 | # No Gustav project found - provide helpful error 82 | raise ValueError( 83 | f"No Gustav project found starting from '{original_start}'. " 84 | f"Looking for directory containing '.tasks/task_graph.json'. " 85 | f"Make sure you're running from within a Gustav project directory." 86 | ) 87 | 88 | class DependencyAnalyzer: 89 | def __init__(self, tasks_dir: str = None): 90 | if tasks_dir is None: 91 | project_root = find_project_root() 92 | tasks_dir = os.path.join(project_root, ".tasks") 93 | self.tasks_dir = tasks_dir 94 | self.task_graph = self._load_json("task_graph.json") 95 | self.progress_tracker = self._load_json("progress_tracker.json") 96 | self.techstack = self._load_json("techstack_research.json") 97 | self.deferred = self._load_json("deferred.json") 98 | 99 | def _load_json(self, filename: str) -> Dict: 100 | """Load JSON file from tasks directory""" 101 | try: 102 | with open(f"{self.tasks_dir}/{filename}", 'r') as f: 103 | return json.load(f) 104 | except FileNotFoundError: 105 | return {} 106 | 107 | def analyze_feature(self, feature_description: str) -> FeatureAnalysis: 108 | """ 109 | Analyze a new feature description and determine its characteristics 110 | and dependencies on existing tasks. 111 | """ 112 | # Extract technical keywords from description 113 | tech_keywords = self._extract_technical_keywords(feature_description) 114 | 115 | # Check against existing techstack 116 | new_technologies = self._identify_new_technologies(tech_keywords) 117 | 118 | # Analyze dependencies on existing tasks 119 | dependencies = self._analyze_dependencies(feature_description, tech_keywords) 120 | 121 | # Detect potential conflicts 122 | conflicts = self._detect_conflicts(feature_description, dependencies) 123 | 124 | # Estimate complexity and task count 125 | complexity, task_count = self._estimate_complexity( 126 | feature_description, new_technologies, dependencies 127 | ) 128 | 129 | return FeatureAnalysis( 130 | feature_id=f"F-ENH-{len(self.task_graph.get('tasks', []))+1:03d}", 131 | description=feature_description, 132 | estimated_tasks=task_count, 133 | complexity=complexity, 134 | new_technologies=new_technologies, 135 | dependencies=dependencies, 136 | conflicts=conflicts 137 | ) 138 | 139 | def _extract_technical_keywords(self, description: str) -> Set[str]: 140 | """Extract technical keywords from feature description""" 141 | # Common technical terms that might indicate dependencies 142 | tech_patterns = { 143 | r'\bapi\b': 'api', 144 | r'\bdatabase\b': 'database', 145 | r'\bauth\w*': 'authentication', 146 | r'\bui\b|\buser interface\b': 'ui', 147 | r'\baudio\b|\bvoice\b|\bsound\b': 'audio', 148 | r'\bhotkey\b|\bshortcut\b|\bkeyboard\b': 'hotkey', 149 | r'\btray\b|\bsystem tray\b': 'system_tray', 150 | r'\bconfig\w*|\bsettings\b': 'configuration', 151 | r'\bfile\b|\bstorage\b': 'file_system', 152 | r'\bnetwork\b|\bhttp\b|\brequest\b': 'networking', 153 | r'\bai\b|\bllm\b|\bmachine learning\b': 'ai', 154 | r'\bparse\w*|\bprocess\w*': 'processing', 155 | } 156 | 157 | keywords = set() 158 | description_lower = description.lower() 159 | 160 | for pattern, keyword in tech_patterns.items(): 161 | if re.search(pattern, description_lower): 162 | keywords.add(keyword) 163 | 164 | return keywords 165 | 166 | def _identify_new_technologies(self, tech_keywords: Set[str]) -> List[str]: 167 | """Identify if feature requires technologies not in current stack""" 168 | current_stack = set() 169 | 170 | # Extract technologies from existing stack 171 | stack_info = self.techstack.get('stack', {}) 172 | if isinstance(stack_info, dict): 173 | for tech in stack_info.values(): 174 | if isinstance(tech, dict): 175 | current_stack.add(tech.get('name', '').lower()) 176 | elif isinstance(tech, str): 177 | current_stack.add(tech.lower()) 178 | 179 | # Check which keywords represent new technologies 180 | new_techs = [] 181 | tech_mappings = { 182 | 'database': ['sqlite', 'postgres', 'mysql'], 183 | 'ui': ['svelte', 'react', 'vue'], 184 | 'networking': ['axios', 'fetch', 'requests'], 185 | 'ai': ['langchain', 'openai', 'anthropic'], 186 | } 187 | 188 | for keyword in tech_keywords: 189 | if keyword in tech_mappings: 190 | # Check if any of the related technologies are in current stack 191 | related_techs = tech_mappings[keyword] 192 | if not any(tech in str(current_stack).lower() for tech in related_techs): 193 | new_techs.append(keyword) 194 | elif keyword not in str(current_stack).lower(): 195 | new_techs.append(keyword) 196 | 197 | return new_techs 198 | 199 | def _analyze_dependencies(self, description: str, tech_keywords: Set[str]) -> List[TaskDependency]: 200 | """Analyze dependencies on existing tasks based on feature description""" 201 | dependencies = [] 202 | existing_tasks = self.task_graph.get('tasks', []) 203 | 204 | # Dependency rules based on technical requirements 205 | dependency_rules = { 206 | 'api': self._find_api_dependencies, 207 | 'audio': self._find_audio_dependencies, 208 | 'hotkey': self._find_hotkey_dependencies, 209 | 'system_tray': self._find_tray_dependencies, 210 | 'authentication': self._find_auth_dependencies, 211 | 'ui': self._find_ui_dependencies, 212 | 'ai': self._find_ai_dependencies, 213 | 'configuration': self._find_config_dependencies, 214 | } 215 | 216 | for keyword in tech_keywords: 217 | if keyword in dependency_rules: 218 | rule_deps = dependency_rules[keyword](existing_tasks, description) 219 | dependencies.extend(rule_deps) 220 | 221 | # Remove duplicates while preserving order 222 | seen = set() 223 | unique_deps = [] 224 | for dep in dependencies: 225 | if dep.task_id not in seen: 226 | seen.add(dep.task_id) 227 | unique_deps.append(dep) 228 | 229 | return unique_deps 230 | 231 | def _find_api_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 232 | """Find API-related dependencies""" 233 | deps = [] 234 | for task in existing_tasks: 235 | task_id = task.get('id', '') 236 | task_title = task.get('title', '').lower() 237 | 238 | # Look for API authentication tasks 239 | if 'api' in task_title and 'auth' in task_title: 240 | deps.append(TaskDependency( 241 | task_id=task_id, 242 | dependency_type=DependencyType.TECHNICAL, 243 | reason="API authentication required for API operations", 244 | strength="required" 245 | )) 246 | 247 | # Look for API client tasks 248 | elif 'api' in task_title and 'client' in task_title: 249 | deps.append(TaskDependency( 250 | task_id=task_id, 251 | dependency_type=DependencyType.TECHNICAL, 252 | reason="API client infrastructure needed", 253 | strength="preferred" 254 | )) 255 | 256 | return deps 257 | 258 | def _find_audio_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 259 | """Find audio/voice-related dependencies""" 260 | deps = [] 261 | for task in existing_tasks: 262 | task_id = task.get('id', '') 263 | task_title = task.get('title', '').lower() 264 | 265 | # Audio capture dependencies 266 | if 'whisper' in task_title or 'audio' in task_title: 267 | deps.append(TaskDependency( 268 | task_id=task_id, 269 | dependency_type=DependencyType.TECHNICAL, 270 | reason="Audio processing infrastructure required", 271 | strength="required" 272 | )) 273 | 274 | # Voice pipeline dependencies 275 | elif 'voice' in task_title and 'pipeline' in task_title: 276 | deps.append(TaskDependency( 277 | task_id=task_id, 278 | dependency_type=DependencyType.LOGICAL, 279 | reason="Voice processing pipeline needed for audio features", 280 | strength="required" 281 | )) 282 | 283 | return deps 284 | 285 | def _find_hotkey_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 286 | """Find hotkey/shortcut-related dependencies""" 287 | deps = [] 288 | for task in existing_tasks: 289 | task_id = task.get('id', '') 290 | task_title = task.get('title', '').lower() 291 | 292 | if 'hotkey' in task_title or 'shortcut' in task_title: 293 | deps.append(TaskDependency( 294 | task_id=task_id, 295 | dependency_type=DependencyType.TECHNICAL, 296 | reason="Global hotkey system required for shortcuts", 297 | strength="required" 298 | )) 299 | 300 | return deps 301 | 302 | def _find_tray_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 303 | """Find system tray-related dependencies""" 304 | deps = [] 305 | for task in existing_tasks: 306 | task_id = task.get('id', '') 307 | task_title = task.get('title', '').lower() 308 | 309 | if 'tray' in task_title: 310 | deps.append(TaskDependency( 311 | task_id=task_id, 312 | dependency_type=DependencyType.TECHNICAL, 313 | reason="System tray infrastructure required", 314 | strength="required" 315 | )) 316 | 317 | return deps 318 | 319 | def _find_auth_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 320 | """Find authentication-related dependencies""" 321 | deps = [] 322 | for task in existing_tasks: 323 | task_id = task.get('id', '') 324 | task_title = task.get('title', '').lower() 325 | 326 | if 'auth' in task_title: 327 | deps.append(TaskDependency( 328 | task_id=task_id, 329 | dependency_type=DependencyType.TECHNICAL, 330 | reason="Authentication system required", 331 | strength="required" 332 | )) 333 | 334 | return deps 335 | 336 | def _find_ui_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 337 | """Find UI-related dependencies""" 338 | deps = [] 339 | for task in existing_tasks: 340 | task_id = task.get('id', '') 341 | task_title = task.get('title', '').lower() 342 | 343 | # Basic app setup usually required for UI features 344 | if 'setup' in task_title or 'initialize' in task_title: 345 | deps.append(TaskDependency( 346 | task_id=task_id, 347 | dependency_type=DependencyType.TECHNICAL, 348 | reason="Application setup required for UI components", 349 | strength="required" 350 | )) 351 | 352 | return deps 353 | 354 | def _find_ai_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 355 | """Find AI/LLM-related dependencies""" 356 | deps = [] 357 | for task in existing_tasks: 358 | task_id = task.get('id', '') 359 | task_title = task.get('title', '').lower() 360 | 361 | if 'langchain' in task_title or 'llm' in task_title or 'ai' in task_title: 362 | deps.append(TaskDependency( 363 | task_id=task_id, 364 | dependency_type=DependencyType.TECHNICAL, 365 | reason="AI/LLM infrastructure required", 366 | strength="required" 367 | )) 368 | 369 | return deps 370 | 371 | def _find_config_dependencies(self, existing_tasks: List[Dict], description: str) -> List[TaskDependency]: 372 | """Find configuration-related dependencies""" 373 | deps = [] 374 | for task in existing_tasks: 375 | task_id = task.get('id', '') 376 | task_title = task.get('title', '').lower() 377 | 378 | # Usually configuration depends on basic setup 379 | if 'setup' in task_title or 'initialize' in task_title: 380 | deps.append(TaskDependency( 381 | task_id=task_id, 382 | dependency_type=DependencyType.LOGICAL, 383 | reason="Basic setup required before configuration", 384 | strength="preferred" 385 | )) 386 | 387 | return deps 388 | 389 | def _detect_conflicts(self, description: str, dependencies: List[TaskDependency]) -> List[str]: 390 | """Detect potential conflicts with existing features""" 391 | conflicts = [] 392 | 393 | # Check against deferred features for potential duplication 394 | deferred_features = self.deferred.get('deferred_features', []) 395 | description_lower = description.lower() 396 | 397 | for deferred in deferred_features: 398 | deferred_name = deferred.get('name', '').lower() 399 | # Simple keyword matching for conflict detection 400 | if any(word in description_lower for word in deferred_name.split()): 401 | conflicts.append(f"Similar to deferred feature: {deferred.get('name')}") 402 | 403 | return conflicts 404 | 405 | def _estimate_complexity( 406 | self, 407 | description: str, 408 | new_technologies: List[str], 409 | dependencies: List[TaskDependency] 410 | ) -> Tuple[str, int]: 411 | """Estimate feature complexity and number of tasks needed""" 412 | 413 | # Base complexity factors 414 | complexity_score = 0 415 | 416 | # Description length and complexity keywords 417 | word_count = len(description.split()) 418 | if word_count > 20: 419 | complexity_score += 2 420 | elif word_count > 10: 421 | complexity_score += 1 422 | 423 | # Complexity keywords 424 | complex_keywords = [ 425 | 'integration', 'synchronization', 'multiple', 'advanced', 426 | 'complex', 'algorithm', 'optimization', 'real-time' 427 | ] 428 | description_lower = description.lower() 429 | for keyword in complex_keywords: 430 | if keyword in description_lower: 431 | complexity_score += 1 432 | 433 | # New technologies add complexity 434 | complexity_score += len(new_technologies) 435 | 436 | # Dependencies add complexity 437 | required_deps = [d for d in dependencies if d.strength == "required"] 438 | complexity_score += len(required_deps) // 2 439 | 440 | # Determine complexity level and task count 441 | if complexity_score <= 2: 442 | return "low", 1 443 | elif complexity_score <= 5: 444 | return "medium", 2 445 | else: 446 | return "high", 3 447 | 448 | def main(): 449 | """Test the dependency analyzer""" 450 | import sys 451 | 452 | if len(sys.argv) < 2: 453 | print("Usage: dependency_analyzer.py 'feature description'") 454 | sys.exit(1) 455 | 456 | feature_desc = sys.argv[1] 457 | analyzer = DependencyAnalyzer() 458 | analysis = analyzer.analyze_feature(feature_desc) 459 | 460 | print(f"Feature Analysis for: {analysis.description}") 461 | print(f"Complexity: {analysis.complexity}") 462 | print(f"Estimated Tasks: {analysis.estimated_tasks}") 463 | print(f"New Technologies: {analysis.new_technologies}") 464 | print(f"Dependencies:") 465 | for dep in analysis.dependencies: 466 | print(f" - {dep.task_id}: {dep.reason} ({dep.strength})") 467 | print(f"Potential Conflicts: {analysis.conflicts}") 468 | 469 | if __name__ == "__main__": 470 | main() -------------------------------------------------------------------------------- /.claude/commands/gustav/utils/task_inserter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Smart Task Insertion Logic for Gustav Enhancement System 4 | 5 | Determines optimal placement of new tasks in existing milestone structures 6 | while maintaining workflow integrity and milestone boundaries. 7 | """ 8 | 9 | import json 10 | import os 11 | import copy 12 | from typing import Dict, List, Tuple, Optional 13 | from dataclasses import dataclass 14 | from enum import Enum 15 | 16 | from dependency_analyzer import FeatureAnalysis, TaskDependency, find_project_root 17 | 18 | class InsertionStrategy(Enum): 19 | CURRENT_MILESTONE = "current_milestone" 20 | FUTURE_MILESTONE = "future_milestone" 21 | NEW_MILESTONE = "new_milestone" 22 | SPLIT_MILESTONE = "split_milestone" 23 | 24 | @dataclass 25 | class InsertionOption: 26 | strategy: InsertionStrategy 27 | target_milestone_id: str 28 | position: int 29 | reasoning: str 30 | impact_score: int # Lower is better 31 | capacity_after: int 32 | dependencies_satisfied: bool 33 | 34 | @dataclass 35 | class InsertionPlan: 36 | selected_option: InsertionOption 37 | new_tasks: List[Dict] 38 | updated_milestones: List[Dict] 39 | impact_summary: Dict 40 | 41 | class TaskInserter: 42 | def __init__(self, tasks_dir: str = None): 43 | if tasks_dir is None: 44 | try: 45 | project_root = find_project_root() 46 | tasks_dir = os.path.join(project_root, ".tasks") 47 | except ValueError as e: 48 | print(f"❌ {e}") 49 | import sys 50 | sys.exit(1) 51 | self.tasks_dir = tasks_dir 52 | self.task_graph = self._load_json("task_graph.json") 53 | self.progress_tracker = self._load_json("progress_tracker.json") 54 | 55 | def _load_json(self, filename: str) -> Dict: 56 | """Load JSON file from tasks directory""" 57 | with open(f"{self.tasks_dir}/{filename}", 'r') as f: 58 | return json.load(f) 59 | 60 | def find_insertion_options(self, analysis: FeatureAnalysis) -> List[InsertionOption]: 61 | """ 62 | Generate all possible insertion options for a feature, 63 | ranked by impact and feasibility. 64 | """ 65 | options = [] 66 | milestones = self.task_graph.get('milestones', []) 67 | strategy = self.task_graph.get('milestone_strategy', {}) 68 | max_tasks = strategy.get('max_tasks_per_milestone', 5) 69 | 70 | current_milestone_id = self.progress_tracker.get('current_milestone', {}).get('id') 71 | 72 | # Option 1: Insert in current milestone (if capacity and dependencies allow) 73 | if current_milestone_id: 74 | current_option = self._evaluate_current_milestone_insertion( 75 | analysis, current_milestone_id, max_tasks 76 | ) 77 | if current_option: 78 | options.append(current_option) 79 | 80 | # Option 2: Insert in future milestones 81 | future_options = self._evaluate_future_milestone_insertions( 82 | analysis, milestones, max_tasks 83 | ) 84 | options.extend(future_options) 85 | 86 | # Option 3: Create new milestone 87 | new_milestone_option = self._evaluate_new_milestone_creation( 88 | analysis, milestones 89 | ) 90 | if new_milestone_option: 91 | options.append(new_milestone_option) 92 | 93 | # Option 4: Split existing milestone 94 | split_options = self._evaluate_milestone_splits( 95 | analysis, milestones, max_tasks 96 | ) 97 | options.extend(split_options) 98 | 99 | # Sort by impact score (lower is better) 100 | options.sort(key=lambda x: (x.impact_score, not x.dependencies_satisfied)) 101 | 102 | return options 103 | 104 | def _evaluate_current_milestone_insertion( 105 | self, 106 | analysis: FeatureAnalysis, 107 | milestone_id: str, 108 | max_tasks: int 109 | ) -> Optional[InsertionOption]: 110 | """Evaluate inserting in current milestone""" 111 | 112 | milestone = self._find_milestone(milestone_id) 113 | if not milestone: 114 | return None 115 | 116 | current_tasks = len(milestone.get('tasks', [])) 117 | capacity = max_tasks - current_tasks 118 | 119 | # Check if there's enough capacity 120 | if capacity < analysis.estimated_tasks: 121 | return None 122 | 123 | # Check if dependencies are satisfied 124 | deps_satisfied = self._check_dependencies_satisfied_in_milestone( 125 | analysis.dependencies, milestone_id, include_previous=True 126 | ) 127 | 128 | # Calculate impact score 129 | impact_score = 1 if deps_satisfied else 10 # Lower is better 130 | 131 | # Find insertion position (before validation task) 132 | position = self._find_insertion_position_in_milestone(milestone) 133 | 134 | return InsertionOption( 135 | strategy=InsertionStrategy.CURRENT_MILESTONE, 136 | target_milestone_id=milestone_id, 137 | position=position, 138 | reasoning=f"Insert in current milestone {milestone_id} - has capacity for {capacity} more tasks", 139 | impact_score=impact_score, 140 | capacity_after=capacity - analysis.estimated_tasks, 141 | dependencies_satisfied=deps_satisfied 142 | ) 143 | 144 | def _evaluate_future_milestone_insertions( 145 | self, 146 | analysis: FeatureAnalysis, 147 | milestones: List[Dict], 148 | max_tasks: int 149 | ) -> List[InsertionOption]: 150 | """Evaluate inserting in future milestones""" 151 | options = [] 152 | current_milestone_id = self.progress_tracker.get('current_milestone', {}).get('id') 153 | 154 | # Find current milestone index 155 | current_index = -1 156 | for i, milestone in enumerate(milestones): 157 | if milestone.get('id') == current_milestone_id: 158 | current_index = i 159 | break 160 | 161 | # Evaluate future milestones 162 | for i in range(current_index + 1, len(milestones)): 163 | milestone = milestones[i] 164 | milestone_id = milestone.get('id') 165 | current_tasks = len(milestone.get('tasks', [])) 166 | capacity = max_tasks - current_tasks 167 | 168 | if capacity >= analysis.estimated_tasks: 169 | # Check dependencies considering all previous milestones 170 | deps_satisfied = self._check_dependencies_satisfied_before_milestone( 171 | analysis.dependencies, i, milestones 172 | ) 173 | 174 | # Calculate impact score based on distance and capacity 175 | distance_penalty = (i - current_index) * 2 176 | capacity_bonus = capacity - analysis.estimated_tasks 177 | impact_score = distance_penalty - capacity_bonus + (0 if deps_satisfied else 10) 178 | 179 | position = self._find_insertion_position_in_milestone(milestone) 180 | 181 | options.append(InsertionOption( 182 | strategy=InsertionStrategy.FUTURE_MILESTONE, 183 | target_milestone_id=milestone_id, 184 | position=position, 185 | reasoning=f"Insert in future milestone {milestone_id} - dependencies satisfied, good capacity", 186 | impact_score=impact_score, 187 | capacity_after=capacity - analysis.estimated_tasks, 188 | dependencies_satisfied=deps_satisfied 189 | )) 190 | 191 | return options 192 | 193 | def _evaluate_new_milestone_creation( 194 | self, 195 | analysis: FeatureAnalysis, 196 | milestones: List[Dict] 197 | ) -> Optional[InsertionOption]: 198 | """Evaluate creating a new milestone for this feature""" 199 | 200 | # Only consider new milestone if feature has 3+ tasks or is high complexity 201 | if analysis.estimated_tasks < 3 and analysis.complexity != "high": 202 | return None 203 | 204 | # Find optimal position for new milestone 205 | optimal_position = self._find_optimal_new_milestone_position( 206 | analysis.dependencies, milestones 207 | ) 208 | 209 | if optimal_position is None: 210 | return None 211 | 212 | # Calculate impact score 213 | impact_score = 15 + (len(milestones) * 2) # Creating milestone has higher impact 214 | 215 | # Check if dependencies would be satisfied 216 | deps_satisfied = self._check_dependencies_satisfied_before_position( 217 | analysis.dependencies, optimal_position, milestones 218 | ) 219 | 220 | new_milestone_id = f"M{len(milestones) + 1}" 221 | 222 | return InsertionOption( 223 | strategy=InsertionStrategy.NEW_MILESTONE, 224 | target_milestone_id=new_milestone_id, 225 | position=optimal_position, 226 | reasoning=f"Create new milestone {new_milestone_id} - feature complex enough to warrant own milestone", 227 | impact_score=impact_score, 228 | capacity_after=5 - analysis.estimated_tasks, # Assume 5-task milestone 229 | dependencies_satisfied=deps_satisfied 230 | ) 231 | 232 | def _evaluate_milestone_splits( 233 | self, 234 | analysis: FeatureAnalysis, 235 | milestones: List[Dict], 236 | max_tasks: int 237 | ) -> List[InsertionOption]: 238 | """Evaluate splitting existing milestones to make room""" 239 | options = [] 240 | 241 | # Only consider splits for medium/high complexity features 242 | if analysis.complexity == "low": 243 | return options 244 | 245 | for i, milestone in enumerate(milestones): 246 | current_tasks = len(milestone.get('tasks', [])) 247 | 248 | # Only split if milestone is near capacity 249 | if current_tasks >= max_tasks - 1: 250 | split_option = self._evaluate_milestone_split( 251 | analysis, milestone, i, milestones 252 | ) 253 | if split_option: 254 | options.append(split_option) 255 | 256 | return options 257 | 258 | def _evaluate_milestone_split( 259 | self, 260 | analysis: FeatureAnalysis, 261 | milestone: Dict, 262 | milestone_index: int, 263 | milestones: List[Dict] 264 | ) -> Optional[InsertionOption]: 265 | """Evaluate splitting a specific milestone""" 266 | 267 | milestone_id = milestone.get('id') 268 | tasks = milestone.get('tasks', []) 269 | 270 | # Don't split milestones with too few tasks 271 | if len(tasks) < 4: 272 | return None 273 | 274 | # Check dependencies 275 | deps_satisfied = self._check_dependencies_satisfied_before_position( 276 | analysis.dependencies, milestone_index, milestones 277 | ) 278 | 279 | # High impact score due to structural changes 280 | impact_score = 20 + (milestone_index * 2) 281 | 282 | return InsertionOption( 283 | strategy=InsertionStrategy.SPLIT_MILESTONE, 284 | target_milestone_id=f"{milestone_id}-SPLIT", 285 | position=milestone_index, 286 | reasoning=f"Split milestone {milestone_id} to make room for feature", 287 | impact_score=impact_score, 288 | capacity_after=2, # Assume split creates room 289 | dependencies_satisfied=deps_satisfied 290 | ) 291 | 292 | def _find_milestone(self, milestone_id: str) -> Optional[Dict]: 293 | """Find milestone by ID""" 294 | for milestone in self.task_graph.get('milestones', []): 295 | if milestone.get('id') == milestone_id: 296 | return milestone 297 | return None 298 | 299 | def _find_insertion_position_in_milestone(self, milestone: Dict) -> int: 300 | """Find optimal position within milestone (usually before validation)""" 301 | tasks = milestone.get('tasks', []) 302 | 303 | # Insert before validation tasks (which start with T-VAL-) 304 | for i, task_id in enumerate(tasks): 305 | if task_id.startswith('T-VAL-'): 306 | return i 307 | 308 | # If no validation task, insert at end 309 | return len(tasks) 310 | 311 | def _check_dependencies_satisfied_in_milestone( 312 | self, 313 | dependencies: List[TaskDependency], 314 | milestone_id: str, 315 | include_previous: bool = False 316 | ) -> bool: 317 | """Check if all required dependencies would be satisfied in/before milestone""" 318 | 319 | if not dependencies: 320 | return True 321 | 322 | # Get all task IDs that would be completed before this insertion point 323 | completed_tasks = set() 324 | 325 | if include_previous: 326 | # Add all tasks from previous milestones 327 | current_found = False 328 | for milestone in self.task_graph.get('milestones', []): 329 | if milestone.get('id') == milestone_id: 330 | current_found = True 331 | break 332 | completed_tasks.update(milestone.get('tasks', [])) 333 | 334 | # Add tasks from current milestone (before insertion point) 335 | milestone = self._find_milestone(milestone_id) 336 | if milestone: 337 | position = self._find_insertion_position_in_milestone(milestone) 338 | milestone_tasks = milestone.get('tasks', []) 339 | completed_tasks.update(milestone_tasks[:position]) 340 | 341 | # Check if all required dependencies are satisfied 342 | for dep in dependencies: 343 | if dep.strength == "required" and dep.task_id not in completed_tasks: 344 | return False 345 | 346 | return True 347 | 348 | def _check_dependencies_satisfied_before_milestone( 349 | self, 350 | dependencies: List[TaskDependency], 351 | milestone_index: int, 352 | milestones: List[Dict] 353 | ) -> bool: 354 | """Check if dependencies are satisfied before given milestone index""" 355 | 356 | if not dependencies: 357 | return True 358 | 359 | # Collect all tasks from milestones before the target 360 | completed_tasks = set() 361 | for i in range(milestone_index): 362 | if i < len(milestones): 363 | completed_tasks.update(milestones[i].get('tasks', [])) 364 | 365 | # Check required dependencies 366 | for dep in dependencies: 367 | if dep.strength == "required" and dep.task_id not in completed_tasks: 368 | return False 369 | 370 | return True 371 | 372 | def _check_dependencies_satisfied_before_position( 373 | self, 374 | dependencies: List[TaskDependency], 375 | position: int, 376 | milestones: List[Dict] 377 | ) -> bool: 378 | """Check if dependencies satisfied before given milestone position""" 379 | return self._check_dependencies_satisfied_before_milestone( 380 | dependencies, position, milestones 381 | ) 382 | 383 | def _find_optimal_new_milestone_position( 384 | self, 385 | dependencies: List[TaskDependency], 386 | milestones: List[Dict] 387 | ) -> Optional[int]: 388 | """Find optimal position for new milestone""" 389 | 390 | if not dependencies: 391 | # If no dependencies, can insert anywhere after current 392 | current_milestone_id = self.progress_tracker.get('current_milestone', {}).get('id') 393 | for i, milestone in enumerate(milestones): 394 | if milestone.get('id') == current_milestone_id: 395 | return i + 1 396 | return len(milestones) # End if current not found 397 | 398 | # Find minimum position where all required deps are satisfied 399 | min_position = 0 400 | for dep in dependencies: 401 | if dep.strength == "required": 402 | # Find which milestone contains this dependency 403 | for i, milestone in enumerate(milestones): 404 | if dep.task_id in milestone.get('tasks', []): 405 | min_position = max(min_position, i + 1) 406 | break 407 | 408 | return min_position if min_position <= len(milestones) else None 409 | 410 | def create_insertion_plan( 411 | self, 412 | analysis: FeatureAnalysis, 413 | selected_option: InsertionOption 414 | ) -> InsertionPlan: 415 | """Create detailed insertion plan based on selected option""" 416 | 417 | # Generate new tasks based on analysis 418 | new_tasks = self._generate_tasks_from_analysis(analysis, selected_option) 419 | 420 | # Create updated milestone structure 421 | updated_milestones = self._create_updated_milestones( 422 | selected_option, new_tasks, analysis 423 | ) 424 | 425 | # Generate impact summary 426 | impact_summary = { 427 | "strategy": selected_option.strategy.value, 428 | "tasks_added": len(new_tasks), 429 | "milestones_affected": self._get_affected_milestones(selected_option), 430 | "capacity_impact": selected_option.capacity_after, 431 | "dependencies_satisfied": selected_option.dependencies_satisfied, 432 | "structural_changes": selected_option.strategy in [ 433 | InsertionStrategy.NEW_MILESTONE, 434 | InsertionStrategy.SPLIT_MILESTONE 435 | ] 436 | } 437 | 438 | return InsertionPlan( 439 | selected_option=selected_option, 440 | new_tasks=new_tasks, 441 | updated_milestones=updated_milestones, 442 | impact_summary=impact_summary 443 | ) 444 | 445 | def _generate_tasks_from_analysis( 446 | self, 447 | analysis: FeatureAnalysis, 448 | option: InsertionOption 449 | ) -> List[Dict]: 450 | """Generate task objects from feature analysis""" 451 | tasks = [] 452 | 453 | for i in range(analysis.estimated_tasks): 454 | task_id = f"T-ENH-{analysis.feature_id.split('-')[2]}-{i+1:03d}" 455 | 456 | # Create task based on template 457 | task = { 458 | "id": task_id, 459 | "title": f"Implement {analysis.description} - Task {i+1}", 460 | "prd_traceability": { 461 | "feature_id": analysis.feature_id, 462 | "prd_lines": ["ENHANCEMENT"], 463 | "original_requirement": analysis.description 464 | }, 465 | "scope_boundaries": { 466 | "must_implement": [f"Part {i+1} of {analysis.description}"], 467 | "must_not_implement": ["Scope creep beyond enhancement"], 468 | "out_of_scope_check": "BLOCK if not in must_implement" 469 | }, 470 | "documentation_context": { 471 | "primary_docs": [], 472 | "version_locks": {}, 473 | "forbidden_patterns": ["experimental features"] 474 | }, 475 | "hallucination_guards": { 476 | "verify_before_use": ["API signatures", "configuration options"], 477 | "forbidden_assumptions": ["no defaults assumed"] 478 | }, 479 | "context_drift_prevention": { 480 | "task_boundaries": f"This task ONLY handles part {i+1} of enhancement", 481 | "refer_to_other_tasks": {}, 482 | "max_file_changes": 3, 483 | "if_exceeds": "STOP and verify scope" 484 | }, 485 | "milestone_metadata": { 486 | "milestone_id": option.target_milestone_id, 487 | "milestone_name": self._get_milestone_name(option.target_milestone_id), 488 | "is_milestone_critical": False, 489 | "can_defer": True, 490 | "milestone_position": option.position + i 491 | }, 492 | "enhancement_metadata": { 493 | "enhancement_id": f"ENH-{analysis.feature_id}", 494 | "added_date": "2025-08-13", # TODO: Use actual date 495 | "insertion_reason": option.reasoning, 496 | "impact_assessment": analysis.complexity 497 | } 498 | } 499 | 500 | tasks.append(task) 501 | 502 | return tasks 503 | 504 | def _create_updated_milestones( 505 | self, 506 | option: InsertionOption, 507 | new_tasks: List[Dict], 508 | analysis: FeatureAnalysis 509 | ) -> List[Dict]: 510 | """Create updated milestone structure with new tasks inserted""" 511 | 512 | milestones = copy.deepcopy(self.task_graph.get('milestones', [])) 513 | new_task_ids = [task['id'] for task in new_tasks] 514 | 515 | if option.strategy == InsertionStrategy.NEW_MILESTONE: 516 | # Create entirely new milestone 517 | new_milestone = { 518 | "id": option.target_milestone_id, 519 | "name": f"Enhancement: {analysis.description[:30]}...", 520 | "description": f"Enhancement milestone for: {analysis.description}", 521 | "tasks": new_task_ids + [f"T-VAL-{option.target_milestone_id}"], 522 | "launch_ready": True, 523 | "validation_criteria": { 524 | "enhancement_works": True, 525 | "no_regressions": True 526 | }, 527 | "human_review_required": True, 528 | "rollback_point": True 529 | } 530 | 531 | # Insert at appropriate position 532 | milestones.insert(option.position, new_milestone) 533 | 534 | else: 535 | # Insert into existing milestone 536 | target_milestone = None 537 | for milestone in milestones: 538 | if milestone.get('id') == option.target_milestone_id: 539 | target_milestone = milestone 540 | break 541 | 542 | if target_milestone: 543 | tasks = target_milestone.get('tasks', []) 544 | # Insert new tasks at specified position 545 | for i, task_id in enumerate(new_task_ids): 546 | tasks.insert(option.position + i, task_id) 547 | target_milestone['tasks'] = tasks 548 | 549 | return milestones 550 | 551 | def _get_milestone_name(self, milestone_id: str) -> str: 552 | """Get milestone name or generate one for new milestones""" 553 | milestone = self._find_milestone(milestone_id) 554 | if milestone: 555 | return milestone.get('name', milestone_id) 556 | return f"Enhancement Milestone {milestone_id}" 557 | 558 | def _get_affected_milestones(self, option: InsertionOption) -> List[str]: 559 | """Get list of milestone IDs affected by this insertion""" 560 | if option.strategy == InsertionStrategy.NEW_MILESTONE: 561 | return [option.target_milestone_id] 562 | else: 563 | return [option.target_milestone_id] 564 | 565 | def main(): 566 | """Test the task inserter""" 567 | import sys 568 | from dependency_analyzer import DependencyAnalyzer 569 | 570 | if len(sys.argv) < 2: 571 | print("Usage: task_inserter.py 'feature description'") 572 | sys.exit(1) 573 | 574 | feature_desc = sys.argv[1] 575 | 576 | # Analyze the feature 577 | analyzer = DependencyAnalyzer() 578 | analysis = analyzer.analyze_feature(feature_desc) 579 | 580 | # Find insertion options 581 | inserter = TaskInserter() 582 | options = inserter.find_insertion_options(analysis) 583 | 584 | print(f"Insertion Options for: {analysis.description}") 585 | print(f"Feature Complexity: {analysis.complexity} ({analysis.estimated_tasks} tasks)") 586 | print() 587 | 588 | for i, option in enumerate(options, 1): 589 | print(f"Option {i}: {option.strategy.value}") 590 | print(f" Target: {option.target_milestone_id}") 591 | print(f" Position: {option.position}") 592 | print(f" Impact Score: {option.impact_score}") 593 | print(f" Dependencies Satisfied: {option.dependencies_satisfied}") 594 | print(f" Reasoning: {option.reasoning}") 595 | print() 596 | 597 | # Create plan for best option 598 | if options: 599 | best_option = options[0] 600 | plan = inserter.create_insertion_plan(analysis, best_option) 601 | print("Recommended Plan:") 602 | print(f" Strategy: {plan.selected_option.strategy.value}") 603 | print(f" Tasks to Add: {len(plan.new_tasks)}") 604 | print(f" Milestones Updated: {len(plan.updated_milestones)}") 605 | print(f" Impact Summary: {plan.impact_summary}") 606 | 607 | if __name__ == "__main__": 608 | main() --------------------------------------------------------------------------------